diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-02-28 23:13:49 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-03-01 17:53:39 -0500 |
commit | 34bbd704051c9d053d69e90569a3a2365f4c7b50 (patch) | |
tree | f5fb6f3941cd09be0fb92ae80f9759ad64fca709 /mm/rmap.c | |
parent | 48dba8ab9b93c3b6b57946bd45ae013402b0b054 (diff) |
[PATCH] adapt page_lock_anon_vma() to PREEMPT_RCU
page_lock_anon_vma() uses spin_lock() to block RCU. This doesn't work with
PREEMPT_RCU, we have to do rcu_read_lock() explicitely. Otherwise, it is
theoretically possible that slab returns anon_vma's memory to the system
before we do spin_unlock(&anon_vma->lock).
[ Hugh points out that this only matters for PREEMPT_RCU, which isn't merged
yet, and may never be. Regardless, this patch is conceptually the
right thing to do, even if it doesn't matter at this point. - Linus ]
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 17 |
1 files changed, 13 insertions, 4 deletions
@@ -183,7 +183,7 @@ void __init anon_vma_init(void) | |||
183 | */ | 183 | */ |
184 | static struct anon_vma *page_lock_anon_vma(struct page *page) | 184 | static struct anon_vma *page_lock_anon_vma(struct page *page) |
185 | { | 185 | { |
186 | struct anon_vma *anon_vma = NULL; | 186 | struct anon_vma *anon_vma; |
187 | unsigned long anon_mapping; | 187 | unsigned long anon_mapping; |
188 | 188 | ||
189 | rcu_read_lock(); | 189 | rcu_read_lock(); |
@@ -195,9 +195,16 @@ static struct anon_vma *page_lock_anon_vma(struct page *page) | |||
195 | 195 | ||
196 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); | 196 | anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); |
197 | spin_lock(&anon_vma->lock); | 197 | spin_lock(&anon_vma->lock); |
198 | return anon_vma; | ||
198 | out: | 199 | out: |
199 | rcu_read_unlock(); | 200 | rcu_read_unlock(); |
200 | return anon_vma; | 201 | return NULL; |
202 | } | ||
203 | |||
204 | static void page_unlock_anon_vma(struct anon_vma *anon_vma) | ||
205 | { | ||
206 | spin_unlock(&anon_vma->lock); | ||
207 | rcu_read_unlock(); | ||
201 | } | 208 | } |
202 | 209 | ||
203 | /* | 210 | /* |
@@ -333,7 +340,8 @@ static int page_referenced_anon(struct page *page) | |||
333 | if (!mapcount) | 340 | if (!mapcount) |
334 | break; | 341 | break; |
335 | } | 342 | } |
336 | spin_unlock(&anon_vma->lock); | 343 | |
344 | page_unlock_anon_vma(anon_vma); | ||
337 | return referenced; | 345 | return referenced; |
338 | } | 346 | } |
339 | 347 | ||
@@ -802,7 +810,8 @@ static int try_to_unmap_anon(struct page *page, int migration) | |||
802 | if (ret == SWAP_FAIL || !page_mapped(page)) | 810 | if (ret == SWAP_FAIL || !page_mapped(page)) |
803 | break; | 811 | break; |
804 | } | 812 | } |
805 | spin_unlock(&anon_vma->lock); | 813 | |
814 | page_unlock_anon_vma(anon_vma); | ||
806 | return ret; | 815 | return ret; |
807 | } | 816 | } |
808 | 817 | ||