diff options
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 42 |
1 files changed, 14 insertions, 28 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 246dcb973ae..a30ea5fcf9f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -121,20 +121,6 @@ static void remove_migration_pte(struct vm_area_struct *vma, | |||
121 | if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) | 121 | if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old) |
122 | goto out; | 122 | goto out; |
123 | 123 | ||
124 | /* | ||
125 | * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge. | ||
126 | * Failure is not an option here: we're now expected to remove every | ||
127 | * migration pte, and will cause crashes otherwise. Normally this | ||
128 | * is not an issue: mem_cgroup_prepare_migration bumped up the old | ||
129 | * page_cgroup count for safety, that's now attached to the new page, | ||
130 | * so this charge should just be another incrementation of the count, | ||
131 | * to keep in balance with rmap.c's mem_cgroup_uncharging. But if | ||
132 | * there's been a force_empty, those reference counts may no longer | ||
133 | * be reliable, and this charge can actually fail: oh well, we don't | ||
134 | * make the situation any worse by proceeding as if it had succeeded. | ||
135 | */ | ||
136 | mem_cgroup_charge_migrate_fixup(new, mm, GFP_ATOMIC); | ||
137 | |||
138 | get_page(new); | 124 | get_page(new); |
139 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 125 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); |
140 | if (is_write_migration_entry(entry)) | 126 | if (is_write_migration_entry(entry)) |
@@ -378,9 +364,6 @@ static void migrate_page_copy(struct page *newpage, struct page *page) | |||
378 | anon = PageAnon(page); | 364 | anon = PageAnon(page); |
379 | page->mapping = NULL; | 365 | page->mapping = NULL; |
380 | 366 | ||
381 | if (!anon) /* This page was removed from radix-tree. */ | ||
382 | mem_cgroup_uncharge_cache_page(page); | ||
383 | |||
384 | /* | 367 | /* |
385 | * If any waiters have accumulated on the new page then | 368 | * If any waiters have accumulated on the new page then |
386 | * wake them up. | 369 | * wake them up. |
@@ -614,6 +597,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
614 | struct page *newpage = get_new_page(page, private, &result); | 597 | struct page *newpage = get_new_page(page, private, &result); |
615 | int rcu_locked = 0; | 598 | int rcu_locked = 0; |
616 | int charge = 0; | 599 | int charge = 0; |
600 | struct mem_cgroup *mem; | ||
617 | 601 | ||
618 | if (!newpage) | 602 | if (!newpage) |
619 | return -ENOMEM; | 603 | return -ENOMEM; |
@@ -623,24 +607,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
623 | goto move_newpage; | 607 | goto move_newpage; |
624 | } | 608 | } |
625 | 609 | ||
626 | charge = mem_cgroup_prepare_migration(page, newpage); | ||
627 | if (charge == -ENOMEM) { | ||
628 | rc = -ENOMEM; | ||
629 | goto move_newpage; | ||
630 | } | ||
631 | /* prepare cgroup just returns 0 or -ENOMEM */ | 610 | /* prepare cgroup just returns 0 or -ENOMEM */ |
632 | BUG_ON(charge); | ||
633 | |||
634 | rc = -EAGAIN; | 611 | rc = -EAGAIN; |
612 | |||
635 | if (!trylock_page(page)) { | 613 | if (!trylock_page(page)) { |
636 | if (!force) | 614 | if (!force) |
637 | goto move_newpage; | 615 | goto move_newpage; |
638 | lock_page(page); | 616 | lock_page(page); |
639 | } | 617 | } |
640 | 618 | ||
619 | /* charge against new page */ | ||
620 | charge = mem_cgroup_prepare_migration(page, &mem); | ||
621 | if (charge == -ENOMEM) { | ||
622 | rc = -ENOMEM; | ||
623 | goto unlock; | ||
624 | } | ||
625 | BUG_ON(charge); | ||
626 | |||
641 | if (PageWriteback(page)) { | 627 | if (PageWriteback(page)) { |
642 | if (!force) | 628 | if (!force) |
643 | goto unlock; | 629 | goto uncharge; |
644 | wait_on_page_writeback(page); | 630 | wait_on_page_writeback(page); |
645 | } | 631 | } |
646 | /* | 632 | /* |
@@ -693,7 +679,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
693 | rcu_unlock: | 679 | rcu_unlock: |
694 | if (rcu_locked) | 680 | if (rcu_locked) |
695 | rcu_read_unlock(); | 681 | rcu_read_unlock(); |
696 | 682 | uncharge: | |
683 | if (!charge) | ||
684 | mem_cgroup_end_migration(mem, page, newpage); | ||
697 | unlock: | 685 | unlock: |
698 | unlock_page(page); | 686 | unlock_page(page); |
699 | 687 | ||
@@ -709,8 +697,6 @@ unlock: | |||
709 | } | 697 | } |
710 | 698 | ||
711 | move_newpage: | 699 | move_newpage: |
712 | if (!charge) | ||
713 | mem_cgroup_end_migration(newpage); | ||
714 | 700 | ||
715 | /* | 701 | /* |
716 | * Move the new page to the LRU. If migration was not successful | 702 | * Move the new page to the LRU. If migration was not successful |