diff options
author | Nikita Danilov <nikita@clusterfs.com> | 2005-05-01 11:58:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-01 11:58:36 -0400 |
commit | 81b4082dc7666e2bc5ec229d8e837f3bafb96883 (patch) | |
tree | d37c73b9fa3d3d321d0997113c9170b52aeb10b6 | |
parent | 119f657c72fc07d6fd28c61de59cfba1566970a9 (diff) |
[PATCH] mm: rmap.c cleanup
mm/rmap.c:page_referenced_one() and mm/rmap.c:try_to_unmap_one() contain
identical code that
- takes mm->page_table_lock;
- drills through page tables;
- checks that correct pte is reached.
Coalesce this into page_check_address()
Signed-off-by: Nikita Danilov <nikita@clusterfs.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/rmap.c | 113 |
1 files changed, 50 insertions, 63 deletions
@@ -243,6 +243,42 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | |||
243 | } | 243 | } |
244 | 244 | ||
245 | /* | 245 | /* |
246 | * Check that @page is mapped at @address into @mm. | ||
247 | * | ||
248 | * On success returns with mapped pte and locked mm->page_table_lock. | ||
249 | */ | ||
250 | static pte_t *page_check_address(struct page *page, struct mm_struct *mm, | ||
251 | unsigned long address) | ||
252 | { | ||
253 | pgd_t *pgd; | ||
254 | pud_t *pud; | ||
255 | pmd_t *pmd; | ||
256 | pte_t *pte; | ||
257 | |||
258 | /* | ||
259 | * We need the page_table_lock to protect us from page faults, | ||
260 | * munmap, fork, etc... | ||
261 | */ | ||
262 | spin_lock(&mm->page_table_lock); | ||
263 | pgd = pgd_offset(mm, address); | ||
264 | if (likely(pgd_present(*pgd))) { | ||
265 | pud = pud_offset(pgd, address); | ||
266 | if (likely(pud_present(*pud))) { | ||
267 | pmd = pmd_offset(pud, address); | ||
268 | if (likely(pmd_present(*pmd))) { | ||
269 | pte = pte_offset_map(pmd, address); | ||
270 | if (likely(pte_present(*pte) && | ||
271 | page_to_pfn(page) == pte_pfn(*pte))) | ||
272 | return pte; | ||
273 | pte_unmap(pte); | ||
274 | } | ||
275 | } | ||
276 | } | ||
277 | spin_unlock(&mm->page_table_lock); | ||
278 | return ERR_PTR(-ENOENT); | ||
279 | } | ||
280 | |||
281 | /* | ||
246 | * Subfunctions of page_referenced: page_referenced_one called | 282 | * Subfunctions of page_referenced: page_referenced_one called |
247 | * repeatedly from either page_referenced_anon or page_referenced_file. | 283 | * repeatedly from either page_referenced_anon or page_referenced_file. |
248 | */ | 284 | */ |
@@ -251,9 +287,6 @@ static int page_referenced_one(struct page *page, | |||
251 | { | 287 | { |
252 | struct mm_struct *mm = vma->vm_mm; | 288 | struct mm_struct *mm = vma->vm_mm; |
253 | unsigned long address; | 289 | unsigned long address; |
254 | pgd_t *pgd; | ||
255 | pud_t *pud; | ||
256 | pmd_t *pmd; | ||
257 | pte_t *pte; | 290 | pte_t *pte; |
258 | int referenced = 0; | 291 | int referenced = 0; |
259 | 292 | ||
@@ -263,39 +296,18 @@ static int page_referenced_one(struct page *page, | |||
263 | if (address == -EFAULT) | 296 | if (address == -EFAULT) |
264 | goto out; | 297 | goto out; |
265 | 298 | ||
266 | spin_lock(&mm->page_table_lock); | 299 | pte = page_check_address(page, mm, address); |
267 | 300 | if (!IS_ERR(pte)) { | |
268 | pgd = pgd_offset(mm, address); | 301 | if (ptep_clear_flush_young(vma, address, pte)) |
269 | if (!pgd_present(*pgd)) | 302 | referenced++; |
270 | goto out_unlock; | ||
271 | |||
272 | pud = pud_offset(pgd, address); | ||
273 | if (!pud_present(*pud)) | ||
274 | goto out_unlock; | ||
275 | |||
276 | pmd = pmd_offset(pud, address); | ||
277 | if (!pmd_present(*pmd)) | ||
278 | goto out_unlock; | ||
279 | |||
280 | pte = pte_offset_map(pmd, address); | ||
281 | if (!pte_present(*pte)) | ||
282 | goto out_unmap; | ||
283 | |||
284 | if (page_to_pfn(page) != pte_pfn(*pte)) | ||
285 | goto out_unmap; | ||
286 | |||
287 | if (ptep_clear_flush_young(vma, address, pte)) | ||
288 | referenced++; | ||
289 | |||
290 | if (mm != current->mm && !ignore_token && has_swap_token(mm)) | ||
291 | referenced++; | ||
292 | 303 | ||
293 | (*mapcount)--; | 304 | if (mm != current->mm && !ignore_token && has_swap_token(mm)) |
305 | referenced++; | ||
294 | 306 | ||
295 | out_unmap: | 307 | (*mapcount)--; |
296 | pte_unmap(pte); | 308 | pte_unmap(pte); |
297 | out_unlock: | 309 | spin_unlock(&mm->page_table_lock); |
298 | spin_unlock(&mm->page_table_lock); | 310 | } |
299 | out: | 311 | out: |
300 | return referenced; | 312 | return referenced; |
301 | } | 313 | } |
@@ -502,9 +514,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) | |||
502 | { | 514 | { |
503 | struct mm_struct *mm = vma->vm_mm; | 515 | struct mm_struct *mm = vma->vm_mm; |
504 | unsigned long address; | 516 | unsigned long address; |
505 | pgd_t *pgd; | ||
506 | pud_t *pud; | ||
507 | pmd_t *pmd; | ||
508 | pte_t *pte; | 517 | pte_t *pte; |
509 | pte_t pteval; | 518 | pte_t pteval; |
510 | int ret = SWAP_AGAIN; | 519 | int ret = SWAP_AGAIN; |
@@ -515,30 +524,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) | |||
515 | if (address == -EFAULT) | 524 | if (address == -EFAULT) |
516 | goto out; | 525 | goto out; |
517 | 526 | ||
518 | /* | 527 | pte = page_check_address(page, mm, address); |
519 | * We need the page_table_lock to protect us from page faults, | 528 | if (IS_ERR(pte)) |
520 | * munmap, fork, etc... | 529 | goto out; |
521 | */ | ||
522 | spin_lock(&mm->page_table_lock); | ||
523 | |||
524 | pgd = pgd_offset(mm, address); | ||
525 | if (!pgd_present(*pgd)) | ||
526 | goto out_unlock; | ||
527 | |||
528 | pud = pud_offset(pgd, address); | ||
529 | if (!pud_present(*pud)) | ||
530 | goto out_unlock; | ||
531 | |||
532 | pmd = pmd_offset(pud, address); | ||
533 | if (!pmd_present(*pmd)) | ||
534 | goto out_unlock; | ||
535 | |||
536 | pte = pte_offset_map(pmd, address); | ||
537 | if (!pte_present(*pte)) | ||
538 | goto out_unmap; | ||
539 | |||
540 | if (page_to_pfn(page) != pte_pfn(*pte)) | ||
541 | goto out_unmap; | ||
542 | 530 | ||
543 | /* | 531 | /* |
544 | * If the page is mlock()d, we cannot swap it out. | 532 | * If the page is mlock()d, we cannot swap it out. |
@@ -604,7 +592,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) | |||
604 | 592 | ||
605 | out_unmap: | 593 | out_unmap: |
606 | pte_unmap(pte); | 594 | pte_unmap(pte); |
607 | out_unlock: | ||
608 | spin_unlock(&mm->page_table_lock); | 595 | spin_unlock(&mm->page_table_lock); |
609 | out: | 596 | out: |
610 | return ret; | 597 | return ret; |
@@ -708,7 +695,6 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
708 | } | 695 | } |
709 | 696 | ||
710 | pte_unmap(pte); | 697 | pte_unmap(pte); |
711 | |||
712 | out_unlock: | 698 | out_unlock: |
713 | spin_unlock(&mm->page_table_lock); | 699 | spin_unlock(&mm->page_table_lock); |
714 | } | 700 | } |
@@ -860,3 +846,4 @@ int try_to_unmap(struct page *page) | |||
860 | ret = SWAP_SUCCESS; | 846 | ret = SWAP_SUCCESS; |
861 | return ret; | 847 | return ret; |
862 | } | 848 | } |
849 | |||