diff options
-rw-r--r-- | include/linux/rmap.h | 23 | ||||
-rw-r--r-- | mm/migrate.c | 12 | ||||
-rw-r--r-- | mm/rmap.c | 10 |
3 files changed, 40 insertions, 5 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index d25bd224d370..567d43f29a10 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -29,6 +29,9 @@ struct anon_vma { | |||
29 | #ifdef CONFIG_KSM | 29 | #ifdef CONFIG_KSM |
30 | atomic_t ksm_refcount; | 30 | atomic_t ksm_refcount; |
31 | #endif | 31 | #endif |
32 | #ifdef CONFIG_MIGRATION | ||
33 | atomic_t migrate_refcount; | ||
34 | #endif | ||
32 | /* | 35 | /* |
33 | * NOTE: the LSB of the head.next is set by | 36 | * NOTE: the LSB of the head.next is set by |
34 | * mm_take_all_locks() _after_ taking the above lock. So the | 37 | * mm_take_all_locks() _after_ taking the above lock. So the |
@@ -81,6 +84,26 @@ static inline int ksm_refcount(struct anon_vma *anon_vma) | |||
81 | return 0; | 84 | return 0; |
82 | } | 85 | } |
83 | #endif /* CONFIG_KSM */ | 86 | #endif /* CONFIG_KSM */ |
87 | #ifdef CONFIG_MIGRATION | ||
88 | static inline void migrate_refcount_init(struct anon_vma *anon_vma) | ||
89 | { | ||
90 | atomic_set(&anon_vma->migrate_refcount, 0); | ||
91 | } | ||
92 | |||
93 | static inline int migrate_refcount(struct anon_vma *anon_vma) | ||
94 | { | ||
95 | return atomic_read(&anon_vma->migrate_refcount); | ||
96 | } | ||
97 | #else | ||
98 | static inline void migrate_refcount_init(struct anon_vma *anon_vma) | ||
99 | { | ||
100 | } | ||
101 | |||
102 | static inline int migrate_refcount(struct anon_vma *anon_vma) | ||
103 | { | ||
104 | return 0; | ||
105 | } | ||
106 | #endif /* CONFIG_MIGRATE */ | ||
84 | 107 | ||
85 | static inline struct anon_vma *page_anon_vma(struct page *page) | 108 | static inline struct anon_vma *page_anon_vma(struct page *page) |
86 | { | 109 | { |
diff --git a/mm/migrate.c b/mm/migrate.c index 5938db54e1d7..b768a1d4fa43 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -543,6 +543,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
543 | int rcu_locked = 0; | 543 | int rcu_locked = 0; |
544 | int charge = 0; | 544 | int charge = 0; |
545 | struct mem_cgroup *mem = NULL; | 545 | struct mem_cgroup *mem = NULL; |
546 | struct anon_vma *anon_vma = NULL; | ||
546 | 547 | ||
547 | if (!newpage) | 548 | if (!newpage) |
548 | return -ENOMEM; | 549 | return -ENOMEM; |
@@ -599,6 +600,8 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
599 | if (PageAnon(page)) { | 600 | if (PageAnon(page)) { |
600 | rcu_read_lock(); | 601 | rcu_read_lock(); |
601 | rcu_locked = 1; | 602 | rcu_locked = 1; |
603 | anon_vma = page_anon_vma(page); | ||
604 | atomic_inc(&anon_vma->migrate_refcount); | ||
602 | } | 605 | } |
603 | 606 | ||
604 | /* | 607 | /* |
@@ -638,6 +641,15 @@ skip_unmap: | |||
638 | if (rc) | 641 | if (rc) |
639 | remove_migration_ptes(page, page); | 642 | remove_migration_ptes(page, page); |
640 | rcu_unlock: | 643 | rcu_unlock: |
644 | |||
645 | /* Drop an anon_vma reference if we took one */ | ||
646 | if (anon_vma && atomic_dec_and_lock(&anon_vma->migrate_refcount, &anon_vma->lock)) { | ||
647 | int empty = list_empty(&anon_vma->head); | ||
648 | spin_unlock(&anon_vma->lock); | ||
649 | if (empty) | ||
650 | anon_vma_free(anon_vma); | ||
651 | } | ||
652 | |||
641 | if (rcu_locked) | 653 | if (rcu_locked) |
642 | rcu_read_unlock(); | 654 | rcu_read_unlock(); |
643 | uncharge: | 655 | uncharge: |
@@ -250,7 +250,8 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) | |||
250 | list_del(&anon_vma_chain->same_anon_vma); | 250 | list_del(&anon_vma_chain->same_anon_vma); |
251 | 251 | ||
252 | /* We must garbage collect the anon_vma if it's empty */ | 252 | /* We must garbage collect the anon_vma if it's empty */ |
253 | empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); | 253 | empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma) && |
254 | !migrate_refcount(anon_vma); | ||
254 | spin_unlock(&anon_vma->lock); | 255 | spin_unlock(&anon_vma->lock); |
255 | 256 | ||
256 | if (empty) | 257 | if (empty) |
@@ -275,6 +276,7 @@ static void anon_vma_ctor(void *data) | |||
275 | 276 | ||
276 | spin_lock_init(&anon_vma->lock); | 277 | spin_lock_init(&anon_vma->lock); |
277 | ksm_refcount_init(anon_vma); | 278 | ksm_refcount_init(anon_vma); |
279 | migrate_refcount_init(anon_vma); | ||
278 | INIT_LIST_HEAD(&anon_vma->head); | 280 | INIT_LIST_HEAD(&anon_vma->head); |
279 | } | 281 | } |
280 | 282 | ||
@@ -1355,10 +1357,8 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, | |||
1355 | /* | 1357 | /* |
1356 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma() | 1358 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma() |
1357 | * because that depends on page_mapped(); but not all its usages | 1359 | * because that depends on page_mapped(); but not all its usages |
1358 | * are holding mmap_sem, which also gave the necessary guarantee | 1360 | * are holding mmap_sem. Users without mmap_sem are required to |
1359 | * (that this anon_vma's slab has not already been destroyed). | 1361 | * take a reference count to prevent the anon_vma disappearing |
1360 | * This needs to be reviewed later: avoiding page_lock_anon_vma() | ||
1361 | * is risky, and currently limits the usefulness of rmap_walk(). | ||
1362 | */ | 1362 | */ |
1363 | anon_vma = page_anon_vma(page); | 1363 | anon_vma = page_anon_vma(page); |
1364 | if (!anon_vma) | 1364 | if (!anon_vma) |