diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 34 |
1 files changed, 12 insertions, 22 deletions
| @@ -49,6 +49,7 @@ | |||
| 49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
| 50 | #include <linux/kallsyms.h> | 50 | #include <linux/kallsyms.h> |
| 51 | #include <linux/memcontrol.h> | 51 | #include <linux/memcontrol.h> |
| 52 | #include <linux/mmu_notifier.h> | ||
| 52 | 53 | ||
| 53 | #include <asm/tlbflush.h> | 54 | #include <asm/tlbflush.h> |
| 54 | 55 | ||
| @@ -138,7 +139,7 @@ void anon_vma_unlink(struct vm_area_struct *vma) | |||
| 138 | anon_vma_free(anon_vma); | 139 | anon_vma_free(anon_vma); |
| 139 | } | 140 | } |
| 140 | 141 | ||
| 141 | static void anon_vma_ctor(struct kmem_cache *cachep, void *data) | 142 | static void anon_vma_ctor(void *data) |
| 142 | { | 143 | { |
| 143 | struct anon_vma *anon_vma = data; | 144 | struct anon_vma *anon_vma = data; |
| 144 | 145 | ||
| @@ -287,7 +288,7 @@ static int page_referenced_one(struct page *page, | |||
| 287 | if (vma->vm_flags & VM_LOCKED) { | 288 | if (vma->vm_flags & VM_LOCKED) { |
| 288 | referenced++; | 289 | referenced++; |
| 289 | *mapcount = 1; /* break early from loop */ | 290 | *mapcount = 1; /* break early from loop */ |
| 290 | } else if (ptep_clear_flush_young(vma, address, pte)) | 291 | } else if (ptep_clear_flush_young_notify(vma, address, pte)) |
| 291 | referenced++; | 292 | referenced++; |
| 292 | 293 | ||
| 293 | /* Pretend the page is referenced if the task has the | 294 | /* Pretend the page is referenced if the task has the |
| @@ -421,7 +422,7 @@ int page_referenced(struct page *page, int is_locked, | |||
| 421 | referenced += page_referenced_anon(page, mem_cont); | 422 | referenced += page_referenced_anon(page, mem_cont); |
| 422 | else if (is_locked) | 423 | else if (is_locked) |
| 423 | referenced += page_referenced_file(page, mem_cont); | 424 | referenced += page_referenced_file(page, mem_cont); |
| 424 | else if (TestSetPageLocked(page)) | 425 | else if (!trylock_page(page)) |
| 425 | referenced++; | 426 | referenced++; |
| 426 | else { | 427 | else { |
| 427 | if (page->mapping) | 428 | if (page->mapping) |
| @@ -457,7 +458,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | |||
| 457 | pte_t entry; | 458 | pte_t entry; |
| 458 | 459 | ||
| 459 | flush_cache_page(vma, address, pte_pfn(*pte)); | 460 | flush_cache_page(vma, address, pte_pfn(*pte)); |
| 460 | entry = ptep_clear_flush(vma, address, pte); | 461 | entry = ptep_clear_flush_notify(vma, address, pte); |
| 461 | entry = pte_wrprotect(entry); | 462 | entry = pte_wrprotect(entry); |
| 462 | entry = pte_mkclean(entry); | 463 | entry = pte_mkclean(entry); |
| 463 | set_pte_at(mm, address, pte, entry); | 464 | set_pte_at(mm, address, pte, entry); |
| @@ -576,14 +577,8 @@ void page_add_anon_rmap(struct page *page, | |||
| 576 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); | 577 | VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); |
| 577 | if (atomic_inc_and_test(&page->_mapcount)) | 578 | if (atomic_inc_and_test(&page->_mapcount)) |
| 578 | __page_set_anon_rmap(page, vma, address); | 579 | __page_set_anon_rmap(page, vma, address); |
| 579 | else { | 580 | else |
| 580 | __page_check_anon_rmap(page, vma, address); | 581 | __page_check_anon_rmap(page, vma, address); |
| 581 | /* | ||
| 582 | * We unconditionally charged during prepare, we uncharge here | ||
| 583 | * This takes care of balancing the reference counts | ||
| 584 | */ | ||
| 585 | mem_cgroup_uncharge_page(page); | ||
| 586 | } | ||
| 587 | } | 582 | } |
| 588 | 583 | ||
| 589 | /** | 584 | /** |
| @@ -614,12 +609,6 @@ void page_add_file_rmap(struct page *page) | |||
| 614 | { | 609 | { |
| 615 | if (atomic_inc_and_test(&page->_mapcount)) | 610 | if (atomic_inc_and_test(&page->_mapcount)) |
| 616 | __inc_zone_page_state(page, NR_FILE_MAPPED); | 611 | __inc_zone_page_state(page, NR_FILE_MAPPED); |
| 617 | else | ||
| 618 | /* | ||
| 619 | * We unconditionally charged during prepare, we uncharge here | ||
| 620 | * This takes care of balancing the reference counts | ||
| 621 | */ | ||
| 622 | mem_cgroup_uncharge_page(page); | ||
| 623 | } | 612 | } |
| 624 | 613 | ||
| 625 | #ifdef CONFIG_DEBUG_VM | 614 | #ifdef CONFIG_DEBUG_VM |
| @@ -678,7 +667,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma) | |||
| 678 | * Leaving it set also helps swapoff to reinstate ptes | 667 | * Leaving it set also helps swapoff to reinstate ptes |
| 679 | * faster for those pages still in swapcache. | 668 | * faster for those pages still in swapcache. |
| 680 | */ | 669 | */ |
| 681 | if (page_test_dirty(page)) { | 670 | if ((!PageAnon(page) || PageSwapCache(page)) && |
| 671 | page_test_dirty(page)) { | ||
| 682 | page_clear_dirty(page); | 672 | page_clear_dirty(page); |
| 683 | set_page_dirty(page); | 673 | set_page_dirty(page); |
| 684 | } | 674 | } |
| @@ -717,14 +707,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
| 717 | * skipped over this mm) then we should reactivate it. | 707 | * skipped over this mm) then we should reactivate it. |
| 718 | */ | 708 | */ |
| 719 | if (!migration && ((vma->vm_flags & VM_LOCKED) || | 709 | if (!migration && ((vma->vm_flags & VM_LOCKED) || |
| 720 | (ptep_clear_flush_young(vma, address, pte)))) { | 710 | (ptep_clear_flush_young_notify(vma, address, pte)))) { |
| 721 | ret = SWAP_FAIL; | 711 | ret = SWAP_FAIL; |
| 722 | goto out_unmap; | 712 | goto out_unmap; |
| 723 | } | 713 | } |
| 724 | 714 | ||
| 725 | /* Nuke the page table entry. */ | 715 | /* Nuke the page table entry. */ |
| 726 | flush_cache_page(vma, address, page_to_pfn(page)); | 716 | flush_cache_page(vma, address, page_to_pfn(page)); |
| 727 | pteval = ptep_clear_flush(vma, address, pte); | 717 | pteval = ptep_clear_flush_notify(vma, address, pte); |
| 728 | 718 | ||
| 729 | /* Move the dirty bit to the physical page now the pte is gone. */ | 719 | /* Move the dirty bit to the physical page now the pte is gone. */ |
| 730 | if (pte_dirty(pteval)) | 720 | if (pte_dirty(pteval)) |
| @@ -849,12 +839,12 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
| 849 | page = vm_normal_page(vma, address, *pte); | 839 | page = vm_normal_page(vma, address, *pte); |
| 850 | BUG_ON(!page || PageAnon(page)); | 840 | BUG_ON(!page || PageAnon(page)); |
| 851 | 841 | ||
| 852 | if (ptep_clear_flush_young(vma, address, pte)) | 842 | if (ptep_clear_flush_young_notify(vma, address, pte)) |
| 853 | continue; | 843 | continue; |
| 854 | 844 | ||
| 855 | /* Nuke the page table entry. */ | 845 | /* Nuke the page table entry. */ |
| 856 | flush_cache_page(vma, address, pte_pfn(*pte)); | 846 | flush_cache_page(vma, address, pte_pfn(*pte)); |
| 857 | pteval = ptep_clear_flush(vma, address, pte); | 847 | pteval = ptep_clear_flush_notify(vma, address, pte); |
| 858 | 848 | ||
| 859 | /* If nonlinear, store the file page offset in the pte. */ | 849 | /* If nonlinear, store the file page offset in the pte. */ |
| 860 | if (page->index != linear_page_index(vma, address)) | 850 | if (page->index != linear_page_index(vma, address)) |
