diff options
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 72 |
1 files changed, 59 insertions, 13 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index d3f3f7f81075..09e2471afa0f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -40,7 +40,8 @@ | |||
40 | 40 | ||
41 | /* | 41 | /* |
42 | * migrate_prep() needs to be called before we start compiling a list of pages | 42 | * migrate_prep() needs to be called before we start compiling a list of pages |
43 | * to be migrated using isolate_lru_page(). | 43 | * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is |
44 | * undesirable, use migrate_prep_local() | ||
44 | */ | 45 | */ |
45 | int migrate_prep(void) | 46 | int migrate_prep(void) |
46 | { | 47 | { |
@@ -55,26 +56,29 @@ int migrate_prep(void) | |||
55 | return 0; | 56 | return 0; |
56 | } | 57 | } |
57 | 58 | ||
59 | /* Do the necessary work of migrate_prep but not if it involves other CPUs */ | ||
60 | int migrate_prep_local(void) | ||
61 | { | ||
62 | lru_add_drain(); | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
58 | /* | 67 | /* |
59 | * Add isolated pages on the list back to the LRU under page lock | 68 | * Add isolated pages on the list back to the LRU under page lock |
60 | * to avoid leaking evictable pages back onto unevictable list. | 69 | * to avoid leaking evictable pages back onto unevictable list. |
61 | * | ||
62 | * returns the number of pages put back. | ||
63 | */ | 70 | */ |
64 | int putback_lru_pages(struct list_head *l) | 71 | void putback_lru_pages(struct list_head *l) |
65 | { | 72 | { |
66 | struct page *page; | 73 | struct page *page; |
67 | struct page *page2; | 74 | struct page *page2; |
68 | int count = 0; | ||
69 | 75 | ||
70 | list_for_each_entry_safe(page, page2, l, lru) { | 76 | list_for_each_entry_safe(page, page2, l, lru) { |
71 | list_del(&page->lru); | 77 | list_del(&page->lru); |
72 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 78 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
73 | page_is_file_cache(page)); | 79 | page_is_file_cache(page)); |
74 | putback_lru_page(page); | 80 | putback_lru_page(page); |
75 | count++; | ||
76 | } | 81 | } |
77 | return count; | ||
78 | } | 82 | } |
79 | 83 | ||
80 | /* | 84 | /* |
@@ -490,7 +494,8 @@ static int fallback_migrate_page(struct address_space *mapping, | |||
490 | * < 0 - error code | 494 | * < 0 - error code |
491 | * == 0 - success | 495 | * == 0 - success |
492 | */ | 496 | */ |
493 | static int move_to_new_page(struct page *newpage, struct page *page) | 497 | static int move_to_new_page(struct page *newpage, struct page *page, |
498 | int remap_swapcache) | ||
494 | { | 499 | { |
495 | struct address_space *mapping; | 500 | struct address_space *mapping; |
496 | int rc; | 501 | int rc; |
@@ -525,10 +530,12 @@ static int move_to_new_page(struct page *newpage, struct page *page) | |||
525 | else | 530 | else |
526 | rc = fallback_migrate_page(mapping, newpage, page); | 531 | rc = fallback_migrate_page(mapping, newpage, page); |
527 | 532 | ||
528 | if (!rc) | 533 | if (rc) { |
529 | remove_migration_ptes(page, newpage); | ||
530 | else | ||
531 | newpage->mapping = NULL; | 534 | newpage->mapping = NULL; |
535 | } else { | ||
536 | if (remap_swapcache) | ||
537 | remove_migration_ptes(page, newpage); | ||
538 | } | ||
532 | 539 | ||
533 | unlock_page(newpage); | 540 | unlock_page(newpage); |
534 | 541 | ||
@@ -545,9 +552,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
545 | int rc = 0; | 552 | int rc = 0; |
546 | int *result = NULL; | 553 | int *result = NULL; |
547 | struct page *newpage = get_new_page(page, private, &result); | 554 | struct page *newpage = get_new_page(page, private, &result); |
555 | int remap_swapcache = 1; | ||
548 | int rcu_locked = 0; | 556 | int rcu_locked = 0; |
549 | int charge = 0; | 557 | int charge = 0; |
550 | struct mem_cgroup *mem = NULL; | 558 | struct mem_cgroup *mem = NULL; |
559 | struct anon_vma *anon_vma = NULL; | ||
551 | 560 | ||
552 | if (!newpage) | 561 | if (!newpage) |
553 | return -ENOMEM; | 562 | return -ENOMEM; |
@@ -604,6 +613,34 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
604 | if (PageAnon(page)) { | 613 | if (PageAnon(page)) { |
605 | rcu_read_lock(); | 614 | rcu_read_lock(); |
606 | rcu_locked = 1; | 615 | rcu_locked = 1; |
616 | |||
617 | /* Determine how to safely use anon_vma */ | ||
618 | if (!page_mapped(page)) { | ||
619 | if (!PageSwapCache(page)) | ||
620 | goto rcu_unlock; | ||
621 | |||
622 | /* | ||
623 | * We cannot be sure that the anon_vma of an unmapped | ||
624 | * swapcache page is safe to use because we don't | ||
625 | * know in advance if the VMA that this page belonged | ||
626 | * to still exists. If the VMA and others sharing the | ||
627 | * data have been freed, then the anon_vma could | ||
628 | * already be invalid. | ||
629 | * | ||
630 | * To avoid this possibility, swapcache pages get | ||
631 | * migrated but are not remapped when migration | ||
632 | * completes | ||
633 | */ | ||
634 | remap_swapcache = 0; | ||
635 | } else { | ||
636 | /* | ||
637 | * Take a reference count on the anon_vma if the | ||
638 | * page is mapped so that it is guaranteed to | ||
639 | * exist when the page is remapped later | ||
640 | */ | ||
641 | anon_vma = page_anon_vma(page); | ||
642 | atomic_inc(&anon_vma->external_refcount); | ||
643 | } | ||
607 | } | 644 | } |
608 | 645 | ||
609 | /* | 646 | /* |
@@ -638,11 +675,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
638 | 675 | ||
639 | skip_unmap: | 676 | skip_unmap: |
640 | if (!page_mapped(page)) | 677 | if (!page_mapped(page)) |
641 | rc = move_to_new_page(newpage, page); | 678 | rc = move_to_new_page(newpage, page, remap_swapcache); |
642 | 679 | ||
643 | if (rc) | 680 | if (rc && remap_swapcache) |
644 | remove_migration_ptes(page, page); | 681 | remove_migration_ptes(page, page); |
645 | rcu_unlock: | 682 | rcu_unlock: |
683 | |||
684 | /* Drop an anon_vma reference if we took one */ | ||
685 | if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) { | ||
686 | int empty = list_empty(&anon_vma->head); | ||
687 | spin_unlock(&anon_vma->lock); | ||
688 | if (empty) | ||
689 | anon_vma_free(anon_vma); | ||
690 | } | ||
691 | |||
646 | if (rcu_locked) | 692 | if (rcu_locked) |
647 | rcu_read_unlock(); | 693 | rcu_read_unlock(); |
648 | uncharge: | 694 | uncharge: |