diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-03-22 19:32:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:03 -0400 |
commit | 83813267c699ab11cc65a6d9d0f42db42f0862b3 (patch) | |
tree | 8a3257ae177ba0f1bb0aebd4a503357c26472908 | |
parent | 9e60109f125013b6c571f399a15a8b0fe1ffa4e6 (diff) |
mm: move anon_vma ref out from under CONFIG_foo
We need the anon_vma refcount unconditionally to simplify the anon_vma
lifetime rules.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Hugh Dickins <hughd@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/rmap.h | 40 | ||||
-rw-r--r-- | mm/rmap.c | 14 |
2 files changed, 10 insertions, 44 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b9b23ddca63a..fd56111a52c1 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -27,18 +27,15 @@ | |||
27 | struct anon_vma { | 27 | struct anon_vma { |
28 | struct anon_vma *root; /* Root of this anon_vma tree */ | 28 | struct anon_vma *root; /* Root of this anon_vma tree */ |
29 | spinlock_t lock; /* Serialize access to vma list */ | 29 | spinlock_t lock; /* Serialize access to vma list */ |
30 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) | ||
31 | |||
32 | /* | 30 | /* |
33 | * The external_refcount is taken by either KSM or page migration | 31 | * The refcount is taken on an anon_vma when there is no |
34 | * to take a reference to an anon_vma when there is no | ||
35 | * guarantee that the vma of page tables will exist for | 32 | * guarantee that the vma of page tables will exist for |
36 | * the duration of the operation. A caller that takes | 33 | * the duration of the operation. A caller that takes |
37 | * the reference is responsible for clearing up the | 34 | * the reference is responsible for clearing up the |
38 | * anon_vma if they are the last user on release | 35 | * anon_vma if they are the last user on release |
39 | */ | 36 | */ |
40 | atomic_t external_refcount; | 37 | atomic_t refcount; |
41 | #endif | 38 | |
42 | /* | 39 | /* |
43 | * NOTE: the LSB of the head.next is set by | 40 | * NOTE: the LSB of the head.next is set by |
44 | * mm_take_all_locks() _after_ taking the above lock. So the | 41 | * mm_take_all_locks() _after_ taking the above lock. So the |
@@ -71,41 +68,12 @@ struct anon_vma_chain { | |||
71 | }; | 68 | }; |
72 | 69 | ||
73 | #ifdef CONFIG_MMU | 70 | #ifdef CONFIG_MMU |
74 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) | ||
75 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) | ||
76 | { | ||
77 | atomic_set(&anon_vma->external_refcount, 0); | ||
78 | } | ||
79 | |||
80 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) | ||
81 | { | ||
82 | return atomic_read(&anon_vma->external_refcount); | ||
83 | } | ||
84 | |||
85 | static inline void get_anon_vma(struct anon_vma *anon_vma) | 71 | static inline void get_anon_vma(struct anon_vma *anon_vma) |
86 | { | 72 | { |
87 | atomic_inc(&anon_vma->external_refcount); | 73 | atomic_inc(&anon_vma->refcount); |
88 | } | 74 | } |
89 | 75 | ||
90 | void put_anon_vma(struct anon_vma *); | 76 | void put_anon_vma(struct anon_vma *); |
91 | #else | ||
92 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) | ||
93 | { | ||
94 | } | ||
95 | |||
96 | static inline int anonvma_external_refcount(struct anon_vma *anon_vma) | ||
97 | { | ||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static inline void get_anon_vma(struct anon_vma *anon_vma) | ||
102 | { | ||
103 | } | ||
104 | |||
105 | static inline void put_anon_vma(struct anon_vma *anon_vma) | ||
106 | { | ||
107 | } | ||
108 | #endif /* CONFIG_KSM */ | ||
109 | 77 | ||
110 | static inline struct anon_vma *page_anon_vma(struct page *page) | 78 | static inline struct anon_vma *page_anon_vma(struct page *page) |
111 | { | 79 | { |
@@ -272,7 +272,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) | |||
272 | list_del(&anon_vma_chain->same_anon_vma); | 272 | list_del(&anon_vma_chain->same_anon_vma); |
273 | 273 | ||
274 | /* We must garbage collect the anon_vma if it's empty */ | 274 | /* We must garbage collect the anon_vma if it's empty */ |
275 | empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); | 275 | empty = list_empty(&anon_vma->head) && !atomic_read(&anon_vma->refcount); |
276 | anon_vma_unlock(anon_vma); | 276 | anon_vma_unlock(anon_vma); |
277 | 277 | ||
278 | if (empty) { | 278 | if (empty) { |
@@ -303,7 +303,7 @@ static void anon_vma_ctor(void *data) | |||
303 | struct anon_vma *anon_vma = data; | 303 | struct anon_vma *anon_vma = data; |
304 | 304 | ||
305 | spin_lock_init(&anon_vma->lock); | 305 | spin_lock_init(&anon_vma->lock); |
306 | anonvma_external_refcount_init(anon_vma); | 306 | atomic_set(&anon_vma->refcount, 0); |
307 | INIT_LIST_HEAD(&anon_vma->head); | 307 | INIT_LIST_HEAD(&anon_vma->head); |
308 | } | 308 | } |
309 | 309 | ||
@@ -1486,7 +1486,6 @@ int try_to_munlock(struct page *page) | |||
1486 | return try_to_unmap_file(page, TTU_MUNLOCK); | 1486 | return try_to_unmap_file(page, TTU_MUNLOCK); |
1487 | } | 1487 | } |
1488 | 1488 | ||
1489 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) | ||
1490 | /* | 1489 | /* |
1491 | * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root | 1490 | * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root |
1492 | * if necessary. Be careful to do all the tests under the lock. Once | 1491 | * if necessary. Be careful to do all the tests under the lock. Once |
@@ -1495,8 +1494,8 @@ int try_to_munlock(struct page *page) | |||
1495 | */ | 1494 | */ |
1496 | void put_anon_vma(struct anon_vma *anon_vma) | 1495 | void put_anon_vma(struct anon_vma *anon_vma) |
1497 | { | 1496 | { |
1498 | BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0); | 1497 | BUG_ON(atomic_read(&anon_vma->refcount) <= 0); |
1499 | if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { | 1498 | if (atomic_dec_and_lock(&anon_vma->refcount, &anon_vma->root->lock)) { |
1500 | struct anon_vma *root = anon_vma->root; | 1499 | struct anon_vma *root = anon_vma->root; |
1501 | int empty = list_empty(&anon_vma->head); | 1500 | int empty = list_empty(&anon_vma->head); |
1502 | int last_root_user = 0; | 1501 | int last_root_user = 0; |
@@ -1507,8 +1506,8 @@ void put_anon_vma(struct anon_vma *anon_vma) | |||
1507 | * the refcount on the root and check if we need to free it. | 1506 | * the refcount on the root and check if we need to free it. |
1508 | */ | 1507 | */ |
1509 | if (empty && anon_vma != root) { | 1508 | if (empty && anon_vma != root) { |
1510 | BUG_ON(atomic_read(&root->external_refcount) <= 0); | 1509 | BUG_ON(atomic_read(&root->refcount) <= 0); |
1511 | last_root_user = atomic_dec_and_test(&root->external_refcount); | 1510 | last_root_user = atomic_dec_and_test(&root->refcount); |
1512 | root_empty = list_empty(&root->head); | 1511 | root_empty = list_empty(&root->head); |
1513 | } | 1512 | } |
1514 | anon_vma_unlock(anon_vma); | 1513 | anon_vma_unlock(anon_vma); |
@@ -1520,7 +1519,6 @@ void put_anon_vma(struct anon_vma *anon_vma) | |||
1520 | } | 1519 | } |
1521 | } | 1520 | } |
1522 | } | 1521 | } |
1523 | #endif | ||
1524 | 1522 | ||
1525 | #ifdef CONFIG_MIGRATION | 1523 | #ifdef CONFIG_MIGRATION |
1526 | /* | 1524 | /* |