diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 20:59:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:17 -0500 |
commit | 1cb1729b1385884648170d9d1d3aa0c66780d64b (patch) | |
tree | 57b9e58d70f791f2737997bf56a2dad98378d54d /mm/rmap.c | |
parent | af8e3354b4bbd1ee5a3a55d11a5e1fe37e77f0ba (diff) |
mm: pass address down to rmap ones
KSM swapping will know where page_referenced_one() and try_to_unmap_one()
should look. It could hack page->index to get them to do what it wants,
but it seems cleaner now to pass the address down to them.
Make the same change to page_mkclean_one(), since it follows the same
pattern; but there's no real need in its case.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 53 |
1 files changed, 27 insertions, 26 deletions
@@ -336,21 +336,15 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) | |||
336 | * Subfunctions of page_referenced: page_referenced_one called | 336 | * Subfunctions of page_referenced: page_referenced_one called |
337 | * repeatedly from either page_referenced_anon or page_referenced_file. | 337 | * repeatedly from either page_referenced_anon or page_referenced_file. |
338 | */ | 338 | */ |
339 | static int page_referenced_one(struct page *page, | 339 | static int page_referenced_one(struct page *page, struct vm_area_struct *vma, |
340 | struct vm_area_struct *vma, | 340 | unsigned long address, unsigned int *mapcount, |
341 | unsigned int *mapcount, | ||
342 | unsigned long *vm_flags) | 341 | unsigned long *vm_flags) |
343 | { | 342 | { |
344 | struct mm_struct *mm = vma->vm_mm; | 343 | struct mm_struct *mm = vma->vm_mm; |
345 | unsigned long address; | ||
346 | pte_t *pte; | 344 | pte_t *pte; |
347 | spinlock_t *ptl; | 345 | spinlock_t *ptl; |
348 | int referenced = 0; | 346 | int referenced = 0; |
349 | 347 | ||
350 | address = vma_address(page, vma); | ||
351 | if (address == -EFAULT) | ||
352 | goto out; | ||
353 | |||
354 | pte = page_check_address(page, mm, address, &ptl, 0); | 348 | pte = page_check_address(page, mm, address, &ptl, 0); |
355 | if (!pte) | 349 | if (!pte) |
356 | goto out; | 350 | goto out; |
@@ -409,6 +403,9 @@ static int page_referenced_anon(struct page *page, | |||
409 | 403 | ||
410 | mapcount = page_mapcount(page); | 404 | mapcount = page_mapcount(page); |
411 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 405 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
406 | unsigned long address = vma_address(page, vma); | ||
407 | if (address == -EFAULT) | ||
408 | continue; | ||
412 | /* | 409 | /* |
413 | * If we are reclaiming on behalf of a cgroup, skip | 410 | * If we are reclaiming on behalf of a cgroup, skip |
414 | * counting on behalf of references from different | 411 | * counting on behalf of references from different |
@@ -416,7 +413,7 @@ static int page_referenced_anon(struct page *page, | |||
416 | */ | 413 | */ |
417 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) | 414 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) |
418 | continue; | 415 | continue; |
419 | referenced += page_referenced_one(page, vma, | 416 | referenced += page_referenced_one(page, vma, address, |
420 | &mapcount, vm_flags); | 417 | &mapcount, vm_flags); |
421 | if (!mapcount) | 418 | if (!mapcount) |
422 | break; | 419 | break; |
@@ -474,6 +471,9 @@ static int page_referenced_file(struct page *page, | |||
474 | mapcount = page_mapcount(page); | 471 | mapcount = page_mapcount(page); |
475 | 472 | ||
476 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 473 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
474 | unsigned long address = vma_address(page, vma); | ||
475 | if (address == -EFAULT) | ||
476 | continue; | ||
477 | /* | 477 | /* |
478 | * If we are reclaiming on behalf of a cgroup, skip | 478 | * If we are reclaiming on behalf of a cgroup, skip |
479 | * counting on behalf of references from different | 479 | * counting on behalf of references from different |
@@ -481,7 +481,7 @@ static int page_referenced_file(struct page *page, | |||
481 | */ | 481 | */ |
482 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) | 482 | if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont)) |
483 | continue; | 483 | continue; |
484 | referenced += page_referenced_one(page, vma, | 484 | referenced += page_referenced_one(page, vma, address, |
485 | &mapcount, vm_flags); | 485 | &mapcount, vm_flags); |
486 | if (!mapcount) | 486 | if (!mapcount) |
487 | break; | 487 | break; |
@@ -535,18 +535,14 @@ int page_referenced(struct page *page, | |||
535 | return referenced; | 535 | return referenced; |
536 | } | 536 | } |
537 | 537 | ||
538 | static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) | 538 | static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, |
539 | unsigned long address) | ||
539 | { | 540 | { |
540 | struct mm_struct *mm = vma->vm_mm; | 541 | struct mm_struct *mm = vma->vm_mm; |
541 | unsigned long address; | ||
542 | pte_t *pte; | 542 | pte_t *pte; |
543 | spinlock_t *ptl; | 543 | spinlock_t *ptl; |
544 | int ret = 0; | 544 | int ret = 0; |
545 | 545 | ||
546 | address = vma_address(page, vma); | ||
547 | if (address == -EFAULT) | ||
548 | goto out; | ||
549 | |||
550 | pte = page_check_address(page, mm, address, &ptl, 1); | 546 | pte = page_check_address(page, mm, address, &ptl, 1); |
551 | if (!pte) | 547 | if (!pte) |
552 | goto out; | 548 | goto out; |
@@ -578,8 +574,12 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page) | |||
578 | 574 | ||
579 | spin_lock(&mapping->i_mmap_lock); | 575 | spin_lock(&mapping->i_mmap_lock); |
580 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 576 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
581 | if (vma->vm_flags & VM_SHARED) | 577 | if (vma->vm_flags & VM_SHARED) { |
582 | ret += page_mkclean_one(page, vma); | 578 | unsigned long address = vma_address(page, vma); |
579 | if (address == -EFAULT) | ||
580 | continue; | ||
581 | ret += page_mkclean_one(page, vma, address); | ||
582 | } | ||
583 | } | 583 | } |
584 | spin_unlock(&mapping->i_mmap_lock); | 584 | spin_unlock(&mapping->i_mmap_lock); |
585 | return ret; | 585 | return ret; |
@@ -761,19 +761,14 @@ void page_remove_rmap(struct page *page) | |||
761 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. | 761 | * repeatedly from either try_to_unmap_anon or try_to_unmap_file. |
762 | */ | 762 | */ |
763 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | 763 | static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, |
764 | enum ttu_flags flags) | 764 | unsigned long address, enum ttu_flags flags) |
765 | { | 765 | { |
766 | struct mm_struct *mm = vma->vm_mm; | 766 | struct mm_struct *mm = vma->vm_mm; |
767 | unsigned long address; | ||
768 | pte_t *pte; | 767 | pte_t *pte; |
769 | pte_t pteval; | 768 | pte_t pteval; |
770 | spinlock_t *ptl; | 769 | spinlock_t *ptl; |
771 | int ret = SWAP_AGAIN; | 770 | int ret = SWAP_AGAIN; |
772 | 771 | ||
773 | address = vma_address(page, vma); | ||
774 | if (address == -EFAULT) | ||
775 | goto out; | ||
776 | |||
777 | pte = page_check_address(page, mm, address, &ptl, 0); | 772 | pte = page_check_address(page, mm, address, &ptl, 0); |
778 | if (!pte) | 773 | if (!pte) |
779 | goto out; | 774 | goto out; |
@@ -1018,7 +1013,10 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) | |||
1018 | return ret; | 1013 | return ret; |
1019 | 1014 | ||
1020 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 1015 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
1021 | ret = try_to_unmap_one(page, vma, flags); | 1016 | unsigned long address = vma_address(page, vma); |
1017 | if (address == -EFAULT) | ||
1018 | continue; | ||
1019 | ret = try_to_unmap_one(page, vma, address, flags); | ||
1022 | if (ret != SWAP_AGAIN || !page_mapped(page)) | 1020 | if (ret != SWAP_AGAIN || !page_mapped(page)) |
1023 | break; | 1021 | break; |
1024 | } | 1022 | } |
@@ -1056,7 +1054,10 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) | |||
1056 | 1054 | ||
1057 | spin_lock(&mapping->i_mmap_lock); | 1055 | spin_lock(&mapping->i_mmap_lock); |
1058 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | 1056 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { |
1059 | ret = try_to_unmap_one(page, vma, flags); | 1057 | unsigned long address = vma_address(page, vma); |
1058 | if (address == -EFAULT) | ||
1059 | continue; | ||
1060 | ret = try_to_unmap_one(page, vma, address, flags); | ||
1060 | if (ret != SWAP_AGAIN || !page_mapped(page)) | 1061 | if (ret != SWAP_AGAIN || !page_mapped(page)) |
1061 | goto out; | 1062 | goto out; |
1062 | } | 1063 | } |