diff options
author | Vladimir Davydov <vdavydov@virtuozzo.com> | 2016-01-15 19:54:45 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-15 20:56:32 -0500 |
commit | 8749cfea11f3fffe8f7cad891470a77b36e0185f (patch) | |
tree | 1dc712f15e259ba4ff9c0ec62134440b28e89666 /mm/rmap.c | |
parent | d965432234db94ee8e185f8fd8e3181bed7459fd (diff) |
mm: add page_check_address_transhuge() helper
page_referenced_one() and page_idle_clear_pte_refs_one() duplicate the
code for looking up pte of a (possibly transhuge) page. Move this code
to a new helper function, page_check_address_transhuge(), and make the
above mentioned functions use it.
This is just a cleanup, no functional changes are intended.
Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Reviewed-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 115 |
1 files changed, 71 insertions, 44 deletions
@@ -798,48 +798,44 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |||
798 | return 1; | 798 | return 1; |
799 | } | 799 | } |
800 | 800 | ||
801 | struct page_referenced_arg { | 801 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
802 | int mapcount; | ||
803 | int referenced; | ||
804 | unsigned long vm_flags; | ||
805 | struct mem_cgroup *memcg; | ||
806 | }; | ||
807 | /* | 802 | /* |
808 | * arg: page_referenced_arg will be passed | 803 | * Check that @page is mapped at @address into @mm. In contrast to |
804 | * page_check_address(), this function can handle transparent huge pages. | ||
805 | * | ||
806 | * On success returns true with pte mapped and locked. For PMD-mapped | ||
807 | * transparent huge pages *@ptep is set to NULL. | ||
809 | */ | 808 | */ |
810 | static int page_referenced_one(struct page *page, struct vm_area_struct *vma, | 809 | bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, |
811 | unsigned long address, void *arg) | 810 | unsigned long address, pmd_t **pmdp, |
811 | pte_t **ptep, spinlock_t **ptlp) | ||
812 | { | 812 | { |
813 | struct mm_struct *mm = vma->vm_mm; | ||
814 | spinlock_t *ptl; | ||
815 | int referenced = 0; | ||
816 | struct page_referenced_arg *pra = arg; | ||
817 | pgd_t *pgd; | 813 | pgd_t *pgd; |
818 | pud_t *pud; | 814 | pud_t *pud; |
819 | pmd_t *pmd; | 815 | pmd_t *pmd; |
820 | pte_t *pte; | 816 | pte_t *pte; |
817 | spinlock_t *ptl; | ||
821 | 818 | ||
822 | if (unlikely(PageHuge(page))) { | 819 | if (unlikely(PageHuge(page))) { |
823 | /* when pud is not present, pte will be NULL */ | 820 | /* when pud is not present, pte will be NULL */ |
824 | pte = huge_pte_offset(mm, address); | 821 | pte = huge_pte_offset(mm, address); |
825 | if (!pte) | 822 | if (!pte) |
826 | return SWAP_AGAIN; | 823 | return false; |
827 | 824 | ||
828 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); | 825 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); |
826 | pmd = NULL; | ||
829 | goto check_pte; | 827 | goto check_pte; |
830 | } | 828 | } |
831 | 829 | ||
832 | pgd = pgd_offset(mm, address); | 830 | pgd = pgd_offset(mm, address); |
833 | if (!pgd_present(*pgd)) | 831 | if (!pgd_present(*pgd)) |
834 | return SWAP_AGAIN; | 832 | return false; |
835 | pud = pud_offset(pgd, address); | 833 | pud = pud_offset(pgd, address); |
836 | if (!pud_present(*pud)) | 834 | if (!pud_present(*pud)) |
837 | return SWAP_AGAIN; | 835 | return false; |
838 | pmd = pmd_offset(pud, address); | 836 | pmd = pmd_offset(pud, address); |
839 | 837 | ||
840 | if (pmd_trans_huge(*pmd)) { | 838 | if (pmd_trans_huge(*pmd)) { |
841 | int ret = SWAP_AGAIN; | ||
842 | |||
843 | ptl = pmd_lock(mm, pmd); | 839 | ptl = pmd_lock(mm, pmd); |
844 | if (!pmd_present(*pmd)) | 840 | if (!pmd_present(*pmd)) |
845 | goto unlock_pmd; | 841 | goto unlock_pmd; |
@@ -851,31 +847,23 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
851 | if (pmd_page(*pmd) != page) | 847 | if (pmd_page(*pmd) != page) |
852 | goto unlock_pmd; | 848 | goto unlock_pmd; |
853 | 849 | ||
854 | if (vma->vm_flags & VM_LOCKED) { | 850 | pte = NULL; |
855 | pra->vm_flags |= VM_LOCKED; | ||
856 | ret = SWAP_FAIL; /* To break the loop */ | ||
857 | goto unlock_pmd; | ||
858 | } | ||
859 | |||
860 | if (pmdp_clear_flush_young_notify(vma, address, pmd)) | ||
861 | referenced++; | ||
862 | spin_unlock(ptl); | ||
863 | goto found; | 851 | goto found; |
864 | unlock_pmd: | 852 | unlock_pmd: |
865 | spin_unlock(ptl); | 853 | spin_unlock(ptl); |
866 | return ret; | 854 | return false; |
867 | } else { | 855 | } else { |
868 | pmd_t pmde = *pmd; | 856 | pmd_t pmde = *pmd; |
869 | 857 | ||
870 | barrier(); | 858 | barrier(); |
871 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) | 859 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) |
872 | return SWAP_AGAIN; | 860 | return false; |
873 | } | 861 | } |
874 | map_pte: | 862 | map_pte: |
875 | pte = pte_offset_map(pmd, address); | 863 | pte = pte_offset_map(pmd, address); |
876 | if (!pte_present(*pte)) { | 864 | if (!pte_present(*pte)) { |
877 | pte_unmap(pte); | 865 | pte_unmap(pte); |
878 | return SWAP_AGAIN; | 866 | return false; |
879 | } | 867 | } |
880 | 868 | ||
881 | ptl = pte_lockptr(mm, pmd); | 869 | ptl = pte_lockptr(mm, pmd); |
@@ -884,35 +872,74 @@ check_pte: | |||
884 | 872 | ||
885 | if (!pte_present(*pte)) { | 873 | if (!pte_present(*pte)) { |
886 | pte_unmap_unlock(pte, ptl); | 874 | pte_unmap_unlock(pte, ptl); |
887 | return SWAP_AGAIN; | 875 | return false; |
888 | } | 876 | } |
889 | 877 | ||
890 | /* THP can be referenced by any subpage */ | 878 | /* THP can be referenced by any subpage */ |
891 | if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) { | 879 | if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) { |
892 | pte_unmap_unlock(pte, ptl); | 880 | pte_unmap_unlock(pte, ptl); |
893 | return SWAP_AGAIN; | 881 | return false; |
894 | } | 882 | } |
883 | found: | ||
884 | *ptep = pte; | ||
885 | *pmdp = pmd; | ||
886 | *ptlp = ptl; | ||
887 | return true; | ||
888 | } | ||
889 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
890 | |||
891 | struct page_referenced_arg { | ||
892 | int mapcount; | ||
893 | int referenced; | ||
894 | unsigned long vm_flags; | ||
895 | struct mem_cgroup *memcg; | ||
896 | }; | ||
897 | /* | ||
898 | * arg: page_referenced_arg will be passed | ||
899 | */ | ||
900 | static int page_referenced_one(struct page *page, struct vm_area_struct *vma, | ||
901 | unsigned long address, void *arg) | ||
902 | { | ||
903 | struct mm_struct *mm = vma->vm_mm; | ||
904 | struct page_referenced_arg *pra = arg; | ||
905 | pmd_t *pmd; | ||
906 | pte_t *pte; | ||
907 | spinlock_t *ptl; | ||
908 | int referenced = 0; | ||
909 | |||
910 | if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl)) | ||
911 | return SWAP_AGAIN; | ||
895 | 912 | ||
896 | if (vma->vm_flags & VM_LOCKED) { | 913 | if (vma->vm_flags & VM_LOCKED) { |
897 | pte_unmap_unlock(pte, ptl); | 914 | if (pte) |
915 | pte_unmap(pte); | ||
916 | spin_unlock(ptl); | ||
898 | pra->vm_flags |= VM_LOCKED; | 917 | pra->vm_flags |= VM_LOCKED; |
899 | return SWAP_FAIL; /* To break the loop */ | 918 | return SWAP_FAIL; /* To break the loop */ |
900 | } | 919 | } |
901 | 920 | ||
902 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | 921 | if (pte) { |
903 | /* | 922 | if (ptep_clear_flush_young_notify(vma, address, pte)) { |
904 | * Don't treat a reference through a sequentially read | 923 | /* |
905 | * mapping as such. If the page has been used in | 924 | * Don't treat a reference through a sequentially read |
906 | * another mapping, we will catch it; if this other | 925 | * mapping as such. If the page has been used in |
907 | * mapping is already gone, the unmap path will have | 926 | * another mapping, we will catch it; if this other |
908 | * set PG_referenced or activated the page. | 927 | * mapping is already gone, the unmap path will have |
909 | */ | 928 | * set PG_referenced or activated the page. |
910 | if (likely(!(vma->vm_flags & VM_SEQ_READ))) | 929 | */ |
930 | if (likely(!(vma->vm_flags & VM_SEQ_READ))) | ||
931 | referenced++; | ||
932 | } | ||
933 | pte_unmap(pte); | ||
934 | } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { | ||
935 | if (pmdp_clear_flush_young_notify(vma, address, pmd)) | ||
911 | referenced++; | 936 | referenced++; |
937 | } else { | ||
938 | /* unexpected pmd-mapped page? */ | ||
939 | WARN_ON_ONCE(1); | ||
912 | } | 940 | } |
913 | pte_unmap_unlock(pte, ptl); | 941 | spin_unlock(ptl); |
914 | 942 | ||
915 | found: | ||
916 | if (referenced) | 943 | if (referenced) |
917 | clear_page_idle(page); | 944 | clear_page_idle(page); |
918 | if (test_and_clear_page_young(page)) | 945 | if (test_and_clear_page_young(page)) |