aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorJeremy Erickson <jerickso@cs.unc.edu>2014-04-11 13:24:45 -0400
committerJeremy Erickson <jerickso@cs.unc.edu>2014-04-11 13:24:45 -0400
commit438145c7ef5c9445f25bb8fc4d52e2c9d11fdc7c (patch)
tree76941991e36f4a32bf1be0db3854959053f24619 /mm/migrate.c
parent9ddd1b8ad8abd321964b8add5581910de6d67c2a (diff)
Update from 2.6.36 to 2.6.36.4wip-dissipation-jerickso
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c48
1 files changed, 19 insertions, 29 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 38e7cad782f4..2cfa9bf1f0d4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -553,7 +553,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
553 int *result = NULL; 553 int *result = NULL;
554 struct page *newpage = get_new_page(page, private, &result); 554 struct page *newpage = get_new_page(page, private, &result);
555 int remap_swapcache = 1; 555 int remap_swapcache = 1;
556 int rcu_locked = 0;
557 int charge = 0; 556 int charge = 0;
558 struct mem_cgroup *mem = NULL; 557 struct mem_cgroup *mem = NULL;
559 struct anon_vma *anon_vma = NULL; 558 struct anon_vma *anon_vma = NULL;
@@ -605,20 +604,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
605 /* 604 /*
606 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, 605 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
607 * we cannot notice that anon_vma is freed while we migrates a page. 606 * we cannot notice that anon_vma is freed while we migrates a page.
608 * This rcu_read_lock() delays freeing anon_vma pointer until the end 607 * This get_anon_vma() delays freeing anon_vma pointer until the end
609 * of migration. File cache pages are no problem because of page_lock() 608 * of migration. File cache pages are no problem because of page_lock()
610 * File Caches may use write_page() or lock_page() in migration, then, 609 * File Caches may use write_page() or lock_page() in migration, then,
611 * just care Anon page here. 610 * just care Anon page here.
612 */ 611 */
613 if (PageAnon(page)) { 612 if (PageAnon(page)) {
614 rcu_read_lock(); 613 /*
615 rcu_locked = 1; 614 * Only page_lock_anon_vma() understands the subtleties of
616 615 * getting a hold on an anon_vma from outside one of its mms.
617 /* Determine how to safely use anon_vma */ 616 */
618 if (!page_mapped(page)) { 617 anon_vma = page_lock_anon_vma(page);
619 if (!PageSwapCache(page)) 618 if (anon_vma) {
620 goto rcu_unlock; 619 /*
621 620 * Take a reference count on the anon_vma if the
621 * page is mapped so that it is guaranteed to
622 * exist when the page is remapped later
623 */
624 get_anon_vma(anon_vma);
625 page_unlock_anon_vma(anon_vma);
626 } else if (PageSwapCache(page)) {
622 /* 627 /*
623 * We cannot be sure that the anon_vma of an unmapped 628 * We cannot be sure that the anon_vma of an unmapped
624 * swapcache page is safe to use because we don't 629 * swapcache page is safe to use because we don't
@@ -633,13 +638,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
633 */ 638 */
634 remap_swapcache = 0; 639 remap_swapcache = 0;
635 } else { 640 } else {
636 /* 641 goto uncharge;
637 * Take a reference count on the anon_vma if the
638 * page is mapped so that it is guaranteed to
639 * exist when the page is remapped later
640 */
641 anon_vma = page_anon_vma(page);
642 get_anon_vma(anon_vma);
643 } 642 }
644 } 643 }
645 644
@@ -656,16 +655,10 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
656 * free the metadata, so the page can be freed. 655 * free the metadata, so the page can be freed.
657 */ 656 */
658 if (!page->mapping) { 657 if (!page->mapping) {
659 if (!PageAnon(page) && page_has_private(page)) { 658 VM_BUG_ON(PageAnon(page));
660 /* 659 if (page_has_private(page)) {
661 * Go direct to try_to_free_buffers() here because
662 * a) that's what try_to_release_page() would do anyway
663 * b) we may be under rcu_read_lock() here, so we can't
664 * use GFP_KERNEL which is what try_to_release_page()
665 * needs to be effective.
666 */
667 try_to_free_buffers(page); 660 try_to_free_buffers(page);
668 goto rcu_unlock; 661 goto uncharge;
669 } 662 }
670 goto skip_unmap; 663 goto skip_unmap;
671 } 664 }
@@ -679,14 +672,11 @@ skip_unmap:
679 672
680 if (rc && remap_swapcache) 673 if (rc && remap_swapcache)
681 remove_migration_ptes(page, page); 674 remove_migration_ptes(page, page);
682rcu_unlock:
683 675
684 /* Drop an anon_vma reference if we took one */ 676 /* Drop an anon_vma reference if we took one */
685 if (anon_vma) 677 if (anon_vma)
686 drop_anon_vma(anon_vma); 678 drop_anon_vma(anon_vma);
687 679
688 if (rcu_locked)
689 rcu_read_unlock();
690uncharge: 680uncharge:
691 if (!charge) 681 if (!charge)
692 mem_cgroup_end_migration(mem, page, newpage); 682 mem_cgroup_end_migration(mem, page, newpage);