aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c91
1 files changed, 61 insertions, 30 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index c95d2ba27a0b..f21f4a1d6a1c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -177,6 +177,10 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
177 list_add(&avc->same_vma, &vma->anon_vma_chain); 177 list_add(&avc->same_vma, &vma->anon_vma_chain);
178 178
179 anon_vma_lock(anon_vma); 179 anon_vma_lock(anon_vma);
180 /*
181 * It's critical to add new vmas to the tail of the anon_vma,
182 * see comment in huge_memory.c:__split_huge_page().
183 */
180 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 184 list_add_tail(&avc->same_anon_vma, &anon_vma->head);
181 anon_vma_unlock(anon_vma); 185 anon_vma_unlock(anon_vma);
182} 186}
@@ -360,7 +364,7 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
360 * Returns virtual address or -EFAULT if page's index/offset is not 364 * Returns virtual address or -EFAULT if page's index/offset is not
361 * within the range mapped the @vma. 365 * within the range mapped the @vma.
362 */ 366 */
363static inline unsigned long 367inline unsigned long
364vma_address(struct page *page, struct vm_area_struct *vma) 368vma_address(struct page *page, struct vm_area_struct *vma)
365{ 369{
366 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 370 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -435,6 +439,8 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
435 pmd = pmd_offset(pud, address); 439 pmd = pmd_offset(pud, address);
436 if (!pmd_present(*pmd)) 440 if (!pmd_present(*pmd))
437 return NULL; 441 return NULL;
442 if (pmd_trans_huge(*pmd))
443 return NULL;
438 444
439 pte = pte_offset_map(pmd, address); 445 pte = pte_offset_map(pmd, address);
440 /* Make a quick check before getting the lock */ 446 /* Make a quick check before getting the lock */
@@ -489,35 +495,17 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
489 unsigned long *vm_flags) 495 unsigned long *vm_flags)
490{ 496{
491 struct mm_struct *mm = vma->vm_mm; 497 struct mm_struct *mm = vma->vm_mm;
492 pte_t *pte;
493 spinlock_t *ptl;
494 int referenced = 0; 498 int referenced = 0;
495 499
496 pte = page_check_address(page, mm, address, &ptl, 0);
497 if (!pte)
498 goto out;
499
500 /* 500 /*
501 * Don't want to elevate referenced for mlocked page that gets this far, 501 * Don't want to elevate referenced for mlocked page that gets this far,
502 * in order that it progresses to try_to_unmap and is moved to the 502 * in order that it progresses to try_to_unmap and is moved to the
503 * unevictable list. 503 * unevictable list.
504 */ 504 */
505 if (vma->vm_flags & VM_LOCKED) { 505 if (vma->vm_flags & VM_LOCKED) {
506 *mapcount = 1; /* break early from loop */ 506 *mapcount = 0; /* break early from loop */
507 *vm_flags |= VM_LOCKED; 507 *vm_flags |= VM_LOCKED;
508 goto out_unmap; 508 goto out;
509 }
510
511 if (ptep_clear_flush_young_notify(vma, address, pte)) {
512 /*
513 * Don't treat a reference through a sequentially read
514 * mapping as such. If the page has been used in
515 * another mapping, we will catch it; if this other
516 * mapping is already gone, the unmap path will have
517 * set PG_referenced or activated the page.
518 */
519 if (likely(!VM_SequentialReadHint(vma)))
520 referenced++;
521 } 509 }
522 510
523 /* Pretend the page is referenced if the task has the 511 /* Pretend the page is referenced if the task has the
@@ -526,9 +514,39 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
526 rwsem_is_locked(&mm->mmap_sem)) 514 rwsem_is_locked(&mm->mmap_sem))
527 referenced++; 515 referenced++;
528 516
529out_unmap: 517 if (unlikely(PageTransHuge(page))) {
518 pmd_t *pmd;
519
520 spin_lock(&mm->page_table_lock);
521 pmd = page_check_address_pmd(page, mm, address,
522 PAGE_CHECK_ADDRESS_PMD_FLAG);
523 if (pmd && !pmd_trans_splitting(*pmd) &&
524 pmdp_clear_flush_young_notify(vma, address, pmd))
525 referenced++;
526 spin_unlock(&mm->page_table_lock);
527 } else {
528 pte_t *pte;
529 spinlock_t *ptl;
530
531 pte = page_check_address(page, mm, address, &ptl, 0);
532 if (!pte)
533 goto out;
534
535 if (ptep_clear_flush_young_notify(vma, address, pte)) {
536 /*
537 * Don't treat a reference through a sequentially read
538 * mapping as such. If the page has been used in
539 * another mapping, we will catch it; if this other
540 * mapping is already gone, the unmap path will have
541 * set PG_referenced or activated the page.
542 */
543 if (likely(!VM_SequentialReadHint(vma)))
544 referenced++;
545 }
546 pte_unmap_unlock(pte, ptl);
547 }
548
530 (*mapcount)--; 549 (*mapcount)--;
531 pte_unmap_unlock(pte, ptl);
532 550
533 if (referenced) 551 if (referenced)
534 *vm_flags |= vma->vm_flags; 552 *vm_flags |= vma->vm_flags;
@@ -864,8 +882,13 @@ void do_page_add_anon_rmap(struct page *page,
864 struct vm_area_struct *vma, unsigned long address, int exclusive) 882 struct vm_area_struct *vma, unsigned long address, int exclusive)
865{ 883{
866 int first = atomic_inc_and_test(&page->_mapcount); 884 int first = atomic_inc_and_test(&page->_mapcount);
867 if (first) 885 if (first) {
868 __inc_zone_page_state(page, NR_ANON_PAGES); 886 if (!PageTransHuge(page))
887 __inc_zone_page_state(page, NR_ANON_PAGES);
888 else
889 __inc_zone_page_state(page,
890 NR_ANON_TRANSPARENT_HUGEPAGES);
891 }
869 if (unlikely(PageKsm(page))) 892 if (unlikely(PageKsm(page)))
870 return; 893 return;
871 894
@@ -893,7 +916,10 @@ void page_add_new_anon_rmap(struct page *page,
893 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 916 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
894 SetPageSwapBacked(page); 917 SetPageSwapBacked(page);
895 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 918 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
896 __inc_zone_page_state(page, NR_ANON_PAGES); 919 if (!PageTransHuge(page))
920 __inc_zone_page_state(page, NR_ANON_PAGES);
921 else
922 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
897 __page_set_anon_rmap(page, vma, address, 1); 923 __page_set_anon_rmap(page, vma, address, 1);
898 if (page_evictable(page, vma)) 924 if (page_evictable(page, vma))
899 lru_cache_add_lru(page, LRU_ACTIVE_ANON); 925 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
@@ -911,7 +937,7 @@ void page_add_file_rmap(struct page *page)
911{ 937{
912 if (atomic_inc_and_test(&page->_mapcount)) { 938 if (atomic_inc_and_test(&page->_mapcount)) {
913 __inc_zone_page_state(page, NR_FILE_MAPPED); 939 __inc_zone_page_state(page, NR_FILE_MAPPED);
914 mem_cgroup_update_file_mapped(page, 1); 940 mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
915 } 941 }
916} 942}
917 943
@@ -946,10 +972,14 @@ void page_remove_rmap(struct page *page)
946 return; 972 return;
947 if (PageAnon(page)) { 973 if (PageAnon(page)) {
948 mem_cgroup_uncharge_page(page); 974 mem_cgroup_uncharge_page(page);
949 __dec_zone_page_state(page, NR_ANON_PAGES); 975 if (!PageTransHuge(page))
976 __dec_zone_page_state(page, NR_ANON_PAGES);
977 else
978 __dec_zone_page_state(page,
979 NR_ANON_TRANSPARENT_HUGEPAGES);
950 } else { 980 } else {
951 __dec_zone_page_state(page, NR_FILE_MAPPED); 981 __dec_zone_page_state(page, NR_FILE_MAPPED);
952 mem_cgroup_update_file_mapped(page, -1); 982 mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
953 } 983 }
954 /* 984 /*
955 * It would be tidy to reset the PageAnon mapping here, 985 * It would be tidy to reset the PageAnon mapping here,
@@ -1202,7 +1232,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1202 return ret; 1232 return ret;
1203} 1233}
1204 1234
1205static bool is_vma_temporary_stack(struct vm_area_struct *vma) 1235bool is_vma_temporary_stack(struct vm_area_struct *vma)
1206{ 1236{
1207 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1237 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1208 1238
@@ -1400,6 +1430,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
1400 int ret; 1430 int ret;
1401 1431
1402 BUG_ON(!PageLocked(page)); 1432 BUG_ON(!PageLocked(page));
1433 VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
1403 1434
1404 if (unlikely(PageKsm(page))) 1435 if (unlikely(PageKsm(page)))
1405 ret = try_to_unmap_ksm(page, flags); 1436 ret = try_to_unmap_ksm(page, flags);