diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2017-02-24 17:58:13 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-24 20:46:55 -0500 |
commit | d53a8b49a626fdfce4390710da6d04b4314db25f (patch) | |
tree | 088d3dd8cbab66ae7f2b13c4070161e18f579ca8 /mm | |
parent | 6a328a626f98bb856551e506cabc7c8b969aafa3 (diff) |
mm: drop page_check_address{,_transhuge}
All users are gone. Let's drop them.
Link: http://lkml.kernel.org/r/20170129173858.45174-12-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/rmap.c | 138 |
1 files changed, 0 insertions, 138 deletions
@@ -708,144 +708,6 @@ out: | |||
708 | return pmd; | 708 | return pmd; |
709 | } | 709 | } |
710 | 710 | ||
711 | /* | ||
712 | * Check that @page is mapped at @address into @mm. | ||
713 | * | ||
714 | * If @sync is false, page_check_address may perform a racy check to avoid | ||
715 | * the page table lock when the pte is not present (helpful when reclaiming | ||
716 | * highly shared pages). | ||
717 | * | ||
718 | * On success returns with pte mapped and locked. | ||
719 | */ | ||
720 | pte_t *__page_check_address(struct page *page, struct mm_struct *mm, | ||
721 | unsigned long address, spinlock_t **ptlp, int sync) | ||
722 | { | ||
723 | pmd_t *pmd; | ||
724 | pte_t *pte; | ||
725 | spinlock_t *ptl; | ||
726 | |||
727 | if (unlikely(PageHuge(page))) { | ||
728 | /* when pud is not present, pte will be NULL */ | ||
729 | pte = huge_pte_offset(mm, address); | ||
730 | if (!pte) | ||
731 | return NULL; | ||
732 | |||
733 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); | ||
734 | goto check; | ||
735 | } | ||
736 | |||
737 | pmd = mm_find_pmd(mm, address); | ||
738 | if (!pmd) | ||
739 | return NULL; | ||
740 | |||
741 | pte = pte_offset_map(pmd, address); | ||
742 | /* Make a quick check before getting the lock */ | ||
743 | if (!sync && !pte_present(*pte)) { | ||
744 | pte_unmap(pte); | ||
745 | return NULL; | ||
746 | } | ||
747 | |||
748 | ptl = pte_lockptr(mm, pmd); | ||
749 | check: | ||
750 | spin_lock(ptl); | ||
751 | if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { | ||
752 | *ptlp = ptl; | ||
753 | return pte; | ||
754 | } | ||
755 | pte_unmap_unlock(pte, ptl); | ||
756 | return NULL; | ||
757 | } | ||
758 | |||
759 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
760 | /* | ||
761 | * Check that @page is mapped at @address into @mm. In contrast to | ||
762 | * page_check_address(), this function can handle transparent huge pages. | ||
763 | * | ||
764 | * On success returns true with pte mapped and locked. For PMD-mapped | ||
765 | * transparent huge pages *@ptep is set to NULL. | ||
766 | */ | ||
767 | bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, | ||
768 | unsigned long address, pmd_t **pmdp, | ||
769 | pte_t **ptep, spinlock_t **ptlp) | ||
770 | { | ||
771 | pgd_t *pgd; | ||
772 | pud_t *pud; | ||
773 | pmd_t *pmd; | ||
774 | pte_t *pte; | ||
775 | spinlock_t *ptl; | ||
776 | |||
777 | if (unlikely(PageHuge(page))) { | ||
778 | /* when pud is not present, pte will be NULL */ | ||
779 | pte = huge_pte_offset(mm, address); | ||
780 | if (!pte) | ||
781 | return false; | ||
782 | |||
783 | ptl = huge_pte_lockptr(page_hstate(page), mm, pte); | ||
784 | pmd = NULL; | ||
785 | goto check_pte; | ||
786 | } | ||
787 | |||
788 | pgd = pgd_offset(mm, address); | ||
789 | if (!pgd_present(*pgd)) | ||
790 | return false; | ||
791 | pud = pud_offset(pgd, address); | ||
792 | if (!pud_present(*pud)) | ||
793 | return false; | ||
794 | pmd = pmd_offset(pud, address); | ||
795 | |||
796 | if (pmd_trans_huge(*pmd)) { | ||
797 | ptl = pmd_lock(mm, pmd); | ||
798 | if (!pmd_present(*pmd)) | ||
799 | goto unlock_pmd; | ||
800 | if (unlikely(!pmd_trans_huge(*pmd))) { | ||
801 | spin_unlock(ptl); | ||
802 | goto map_pte; | ||
803 | } | ||
804 | |||
805 | if (pmd_page(*pmd) != page) | ||
806 | goto unlock_pmd; | ||
807 | |||
808 | pte = NULL; | ||
809 | goto found; | ||
810 | unlock_pmd: | ||
811 | spin_unlock(ptl); | ||
812 | return false; | ||
813 | } else { | ||
814 | pmd_t pmde = *pmd; | ||
815 | |||
816 | barrier(); | ||
817 | if (!pmd_present(pmde) || pmd_trans_huge(pmde)) | ||
818 | return false; | ||
819 | } | ||
820 | map_pte: | ||
821 | pte = pte_offset_map(pmd, address); | ||
822 | if (!pte_present(*pte)) { | ||
823 | pte_unmap(pte); | ||
824 | return false; | ||
825 | } | ||
826 | |||
827 | ptl = pte_lockptr(mm, pmd); | ||
828 | check_pte: | ||
829 | spin_lock(ptl); | ||
830 | |||
831 | if (!pte_present(*pte)) { | ||
832 | pte_unmap_unlock(pte, ptl); | ||
833 | return false; | ||
834 | } | ||
835 | |||
836 | /* THP can be referenced by any subpage */ | ||
837 | if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) { | ||
838 | pte_unmap_unlock(pte, ptl); | ||
839 | return false; | ||
840 | } | ||
841 | found: | ||
842 | *ptep = pte; | ||
843 | *pmdp = pmd; | ||
844 | *ptlp = ptl; | ||
845 | return true; | ||
846 | } | ||
847 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
848 | |||
849 | struct page_referenced_arg { | 711 | struct page_referenced_arg { |
850 | int mapcount; | 712 | int mapcount; |
851 | int referenced; | 713 | int referenced; |