aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/rmap.c10
2 files changed, 17 insertions, 5 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 5938db54e1d..b768a1d4fa4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -543,6 +543,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
543 int rcu_locked = 0; 543 int rcu_locked = 0;
544 int charge = 0; 544 int charge = 0;
545 struct mem_cgroup *mem = NULL; 545 struct mem_cgroup *mem = NULL;
546 struct anon_vma *anon_vma = NULL;
546 547
547 if (!newpage) 548 if (!newpage)
548 return -ENOMEM; 549 return -ENOMEM;
@@ -599,6 +600,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
599 if (PageAnon(page)) { 600 if (PageAnon(page)) {
600 rcu_read_lock(); 601 rcu_read_lock();
601 rcu_locked = 1; 602 rcu_locked = 1;
603 anon_vma = page_anon_vma(page);
604 atomic_inc(&anon_vma->migrate_refcount);
602 } 605 }
603 606
604 /* 607 /*
@@ -638,6 +641,15 @@ skip_unmap:
638 if (rc) 641 if (rc)
639 remove_migration_ptes(page, page); 642 remove_migration_ptes(page, page);
640rcu_unlock: 643rcu_unlock:
644
645 /* Drop an anon_vma reference if we took one */
646 if (anon_vma && atomic_dec_and_lock(&anon_vma->migrate_refcount, &anon_vma->lock)) {
647 int empty = list_empty(&anon_vma->head);
648 spin_unlock(&anon_vma->lock);
649 if (empty)
650 anon_vma_free(anon_vma);
651 }
652
641 if (rcu_locked) 653 if (rcu_locked)
642 rcu_read_unlock(); 654 rcu_read_unlock();
643uncharge: 655uncharge:
diff --git a/mm/rmap.c b/mm/rmap.c
index 0feeef860a8..f522cb00864 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -250,7 +250,8 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
250 list_del(&anon_vma_chain->same_anon_vma); 250 list_del(&anon_vma_chain->same_anon_vma);
251 251
252 /* We must garbage collect the anon_vma if it's empty */ 252 /* We must garbage collect the anon_vma if it's empty */
253 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); 253 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma) &&
254 !migrate_refcount(anon_vma);
254 spin_unlock(&anon_vma->lock); 255 spin_unlock(&anon_vma->lock);
255 256
256 if (empty) 257 if (empty)
@@ -275,6 +276,7 @@ static void anon_vma_ctor(void *data)
275 276
276 spin_lock_init(&anon_vma->lock); 277 spin_lock_init(&anon_vma->lock);
277 ksm_refcount_init(anon_vma); 278 ksm_refcount_init(anon_vma);
279 migrate_refcount_init(anon_vma);
278 INIT_LIST_HEAD(&anon_vma->head); 280 INIT_LIST_HEAD(&anon_vma->head);
279} 281}
280 282
@@ -1355,10 +1357,8 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1355 /* 1357 /*
1356 * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1358 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1357 * because that depends on page_mapped(); but not all its usages 1359 * because that depends on page_mapped(); but not all its usages
1358 * are holding mmap_sem, which also gave the necessary guarantee 1360 * are holding mmap_sem. Users without mmap_sem are required to
1359 * (that this anon_vma's slab has not already been destroyed). 1361 * take a reference count to prevent the anon_vma disappearing
1360 * This needs to be reviewed later: avoiding page_lock_anon_vma()
1361 * is risky, and currently limits the usefulness of rmap_walk().
1362 */ 1362 */
1363 anon_vma = page_anon_vma(page); 1363 anon_vma = page_anon_vma(page);
1364 if (!anon_vma) 1364 if (!anon_vma)