aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBob Liu <lliubbo@gmail.com>2012-12-11 19:00:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 20:22:22 -0500
commit6219049ae1ce32b89236646cccaec2a5fc6c4fd2 (patch)
tree551e8b1d53b8d237678c96cb9b442c746609cfcd /mm
parent344aa35c27acdf70d3c67b5aa7cb6aa8585f80c1 (diff)
mm: introduce mm_find_pmd()
Several place need to find the pmd by(mm_struct, address), so introduce a function to simplify it. [akpm@linux-foundation.org: fix warning] Signed-off-by: Bob Liu <lliubbo@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Ni zhan Chen <nizhan.chen@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c55
-rw-r--r--mm/internal.h5
-rw-r--r--mm/ksm.c14
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/rmap.c48
5 files changed, 44 insertions, 92 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6f022f505e8..9ae97242aa8 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1146,22 +1146,14 @@ pmd_t *page_check_address_pmd(struct page *page,
1146 unsigned long address, 1146 unsigned long address,
1147 enum page_check_address_pmd_flag flag) 1147 enum page_check_address_pmd_flag flag)
1148{ 1148{
1149 pgd_t *pgd;
1150 pud_t *pud;
1151 pmd_t *pmd, *ret = NULL; 1149 pmd_t *pmd, *ret = NULL;
1152 1150
1153 if (address & ~HPAGE_PMD_MASK) 1151 if (address & ~HPAGE_PMD_MASK)
1154 goto out; 1152 goto out;
1155 1153
1156 pgd = pgd_offset(mm, address); 1154 pmd = mm_find_pmd(mm, address);
1157 if (!pgd_present(*pgd)) 1155 if (!pmd)
1158 goto out; 1156 goto out;
1159
1160 pud = pud_offset(pgd, address);
1161 if (!pud_present(*pud))
1162 goto out;
1163
1164 pmd = pmd_offset(pud, address);
1165 if (pmd_none(*pmd)) 1157 if (pmd_none(*pmd))
1166 goto out; 1158 goto out;
1167 if (pmd_page(*pmd) != page) 1159 if (pmd_page(*pmd) != page)
@@ -1908,8 +1900,6 @@ static void collapse_huge_page(struct mm_struct *mm,
1908 struct vm_area_struct *vma, 1900 struct vm_area_struct *vma,
1909 int node) 1901 int node)
1910{ 1902{
1911 pgd_t *pgd;
1912 pud_t *pud;
1913 pmd_t *pmd, _pmd; 1903 pmd_t *pmd, _pmd;
1914 pte_t *pte; 1904 pte_t *pte;
1915 pgtable_t pgtable; 1905 pgtable_t pgtable;
@@ -1955,17 +1945,10 @@ static void collapse_huge_page(struct mm_struct *mm,
1955 goto out; 1945 goto out;
1956 VM_BUG_ON(vma->vm_flags & VM_NO_THP); 1946 VM_BUG_ON(vma->vm_flags & VM_NO_THP);
1957 1947
1958 pgd = pgd_offset(mm, address); 1948 pmd = mm_find_pmd(mm, address);
1959 if (!pgd_present(*pgd)) 1949 if (!pmd)
1960 goto out; 1950 goto out;
1961 1951 if (pmd_trans_huge(*pmd))
1962 pud = pud_offset(pgd, address);
1963 if (!pud_present(*pud))
1964 goto out;
1965
1966 pmd = pmd_offset(pud, address);
1967 /* pmd can't go away or become huge under us */
1968 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1969 goto out; 1952 goto out;
1970 1953
1971 anon_vma_lock(vma->anon_vma); 1954 anon_vma_lock(vma->anon_vma);
@@ -2048,8 +2031,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2048 unsigned long address, 2031 unsigned long address,
2049 struct page **hpage) 2032 struct page **hpage)
2050{ 2033{
2051 pgd_t *pgd;
2052 pud_t *pud;
2053 pmd_t *pmd; 2034 pmd_t *pmd;
2054 pte_t *pte, *_pte; 2035 pte_t *pte, *_pte;
2055 int ret = 0, referenced = 0, none = 0; 2036 int ret = 0, referenced = 0, none = 0;
@@ -2060,16 +2041,10 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2060 2041
2061 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 2042 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2062 2043
2063 pgd = pgd_offset(mm, address); 2044 pmd = mm_find_pmd(mm, address);
2064 if (!pgd_present(*pgd)) 2045 if (!pmd)
2065 goto out; 2046 goto out;
2066 2047 if (pmd_trans_huge(*pmd))
2067 pud = pud_offset(pgd, address);
2068 if (!pud_present(*pud))
2069 goto out;
2070
2071 pmd = pmd_offset(pud, address);
2072 if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
2073 goto out; 2048 goto out;
2074 2049
2075 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2050 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2363,22 +2338,12 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2363static void split_huge_page_address(struct mm_struct *mm, 2338static void split_huge_page_address(struct mm_struct *mm,
2364 unsigned long address) 2339 unsigned long address)
2365{ 2340{
2366 pgd_t *pgd;
2367 pud_t *pud;
2368 pmd_t *pmd; 2341 pmd_t *pmd;
2369 2342
2370 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2343 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2371 2344
2372 pgd = pgd_offset(mm, address); 2345 pmd = mm_find_pmd(mm, address);
2373 if (!pgd_present(*pgd)) 2346 if (!pmd)
2374 return;
2375
2376 pud = pud_offset(pgd, address);
2377 if (!pud_present(*pud))
2378 return;
2379
2380 pmd = pmd_offset(pud, address);
2381 if (!pmd_present(*pmd))
2382 return; 2347 return;
2383 /* 2348 /*
2384 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2349 * Caller holds the mmap_sem write mode, so a huge pmd cannot
diff --git a/mm/internal.h b/mm/internal.h
index a4fa284f6bc..52d1fa95719 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -92,6 +92,11 @@ extern int isolate_lru_page(struct page *page);
92extern void putback_lru_page(struct page *page); 92extern void putback_lru_page(struct page *page);
93 93
94/* 94/*
95 * in mm/rmap.c:
96 */
97extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
98
99/*
95 * in mm/page_alloc.c 100 * in mm/page_alloc.c
96 */ 101 */
97extern void __free_pages_bootmem(struct page *page, unsigned int order); 102extern void __free_pages_bootmem(struct page *page, unsigned int order);
diff --git a/mm/ksm.c b/mm/ksm.c
index ae539f0b8aa..31ae5ea1eac 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -778,8 +778,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
778 struct page *kpage, pte_t orig_pte) 778 struct page *kpage, pte_t orig_pte)
779{ 779{
780 struct mm_struct *mm = vma->vm_mm; 780 struct mm_struct *mm = vma->vm_mm;
781 pgd_t *pgd;
782 pud_t *pud;
783 pmd_t *pmd; 781 pmd_t *pmd;
784 pte_t *ptep; 782 pte_t *ptep;
785 spinlock_t *ptl; 783 spinlock_t *ptl;
@@ -792,18 +790,10 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
792 if (addr == -EFAULT) 790 if (addr == -EFAULT)
793 goto out; 791 goto out;
794 792
795 pgd = pgd_offset(mm, addr); 793 pmd = mm_find_pmd(mm, addr);
796 if (!pgd_present(*pgd)) 794 if (!pmd)
797 goto out; 795 goto out;
798
799 pud = pud_offset(pgd, addr);
800 if (!pud_present(*pud))
801 goto out;
802
803 pmd = pmd_offset(pud, addr);
804 BUG_ON(pmd_trans_huge(*pmd)); 796 BUG_ON(pmd_trans_huge(*pmd));
805 if (!pmd_present(*pmd))
806 goto out;
807 797
808 mmun_start = addr; 798 mmun_start = addr;
809 mmun_end = addr + PAGE_SIZE; 799 mmun_end = addr + PAGE_SIZE;
diff --git a/mm/migrate.c b/mm/migrate.c
index 77ed2d77370..1dc4598d251 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -91,8 +91,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
91{ 91{
92 struct mm_struct *mm = vma->vm_mm; 92 struct mm_struct *mm = vma->vm_mm;
93 swp_entry_t entry; 93 swp_entry_t entry;
94 pgd_t *pgd;
95 pud_t *pud;
96 pmd_t *pmd; 94 pmd_t *pmd;
97 pte_t *ptep, pte; 95 pte_t *ptep, pte;
98 spinlock_t *ptl; 96 spinlock_t *ptl;
@@ -103,19 +101,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
103 goto out; 101 goto out;
104 ptl = &mm->page_table_lock; 102 ptl = &mm->page_table_lock;
105 } else { 103 } else {
106 pgd = pgd_offset(mm, addr); 104 pmd = mm_find_pmd(mm, addr);
107 if (!pgd_present(*pgd)) 105 if (!pmd)
108 goto out; 106 goto out;
109
110 pud = pud_offset(pgd, addr);
111 if (!pud_present(*pud))
112 goto out;
113
114 pmd = pmd_offset(pud, addr);
115 if (pmd_trans_huge(*pmd)) 107 if (pmd_trans_huge(*pmd))
116 goto out; 108 goto out;
117 if (!pmd_present(*pmd))
118 goto out;
119 109
120 ptep = pte_offset_map(pmd, addr); 110 ptep = pte_offset_map(pmd, addr);
121 111
diff --git a/mm/rmap.c b/mm/rmap.c
index 2ee1ef0f317..46823fb0e80 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -562,6 +562,27 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
562 return address; 562 return address;
563} 563}
564 564
565pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
566{
567 pgd_t *pgd;
568 pud_t *pud;
569 pmd_t *pmd = NULL;
570
571 pgd = pgd_offset(mm, address);
572 if (!pgd_present(*pgd))
573 goto out;
574
575 pud = pud_offset(pgd, address);
576 if (!pud_present(*pud))
577 goto out;
578
579 pmd = pmd_offset(pud, address);
580 if (!pmd_present(*pmd))
581 pmd = NULL;
582out:
583 return pmd;
584}
585
565/* 586/*
566 * Check that @page is mapped at @address into @mm. 587 * Check that @page is mapped at @address into @mm.
567 * 588 *
@@ -574,8 +595,6 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
574pte_t *__page_check_address(struct page *page, struct mm_struct *mm, 595pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
575 unsigned long address, spinlock_t **ptlp, int sync) 596 unsigned long address, spinlock_t **ptlp, int sync)
576{ 597{
577 pgd_t *pgd;
578 pud_t *pud;
579 pmd_t *pmd; 598 pmd_t *pmd;
580 pte_t *pte; 599 pte_t *pte;
581 spinlock_t *ptl; 600 spinlock_t *ptl;
@@ -586,17 +605,10 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
586 goto check; 605 goto check;
587 } 606 }
588 607
589 pgd = pgd_offset(mm, address); 608 pmd = mm_find_pmd(mm, address);
590 if (!pgd_present(*pgd)) 609 if (!pmd)
591 return NULL;
592
593 pud = pud_offset(pgd, address);
594 if (!pud_present(*pud))
595 return NULL; 610 return NULL;
596 611
597 pmd = pmd_offset(pud, address);
598 if (!pmd_present(*pmd))
599 return NULL;
600 if (pmd_trans_huge(*pmd)) 612 if (pmd_trans_huge(*pmd))
601 return NULL; 613 return NULL;
602 614
@@ -1345,8 +1357,6 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1345 struct vm_area_struct *vma, struct page *check_page) 1357 struct vm_area_struct *vma, struct page *check_page)
1346{ 1358{
1347 struct mm_struct *mm = vma->vm_mm; 1359 struct mm_struct *mm = vma->vm_mm;
1348 pgd_t *pgd;
1349 pud_t *pud;
1350 pmd_t *pmd; 1360 pmd_t *pmd;
1351 pte_t *pte; 1361 pte_t *pte;
1352 pte_t pteval; 1362 pte_t pteval;
@@ -1366,16 +1376,8 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1366 if (end > vma->vm_end) 1376 if (end > vma->vm_end)
1367 end = vma->vm_end; 1377 end = vma->vm_end;
1368 1378
1369 pgd = pgd_offset(mm, address); 1379 pmd = mm_find_pmd(mm, address);
1370 if (!pgd_present(*pgd)) 1380 if (!pmd)
1371 return ret;
1372
1373 pud = pud_offset(pgd, address);
1374 if (!pud_present(*pud))
1375 return ret;
1376
1377 pmd = pmd_offset(pud, address);
1378 if (!pmd_present(*pmd))
1379 return ret; 1381 return ret;
1380 1382
1381 mmun_start = address; 1383 mmun_start = address;