aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@gmail.com>2010-10-26 17:22:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 19:52:09 -0400
commitea4525b6008fb29553306ec6719f8e6930ac9499 (patch)
tree1d168e0a05a5f2fc962b8a6e991a21b704ad6e0b
parent1b36ba815bd91f17e31277a44dd5c6b6a5a8d97e (diff)
rmap: annotate lock context change on page_[un]lock_anon_vma()
The page_lock_anon_vma() conditionally grabs RCU and anon_vma lock but page_unlock_anon_vma() releases them unconditionally. This leads sparse to complain about context imbalance. Annotate them. Signed-off-by: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/rmap.h15
-rw-r--r--mm/rmap.c4
2 files changed, 17 insertions, 2 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 5c98df68a953..07ea89c16761 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -230,7 +230,20 @@ int try_to_munlock(struct page *);
230/* 230/*
231 * Called by memory-failure.c to kill processes. 231 * Called by memory-failure.c to kill processes.
232 */ 232 */
233struct anon_vma *page_lock_anon_vma(struct page *page); 233struct anon_vma *__page_lock_anon_vma(struct page *page);
234
235static inline struct anon_vma *page_lock_anon_vma(struct page *page)
236{
237 struct anon_vma *anon_vma;
238
239 __cond_lock(RCU, anon_vma = __page_lock_anon_vma(page));
240
241 /* (void) is needed to make gcc happy */
242 (void) __cond_lock(&anon_vma->root->lock, anon_vma);
243
244 return anon_vma;
245}
246
234void page_unlock_anon_vma(struct anon_vma *anon_vma); 247void page_unlock_anon_vma(struct anon_vma *anon_vma);
235int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 248int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
236 249
diff --git a/mm/rmap.c b/mm/rmap.c
index f5ad996a4a8f..0995a8f68866 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -314,7 +314,7 @@ void __init anon_vma_init(void)
314 * Getting a lock on a stable anon_vma from a page off the LRU is 314 * Getting a lock on a stable anon_vma from a page off the LRU is
315 * tricky: page_lock_anon_vma rely on RCU to guard against the races. 315 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
316 */ 316 */
317struct anon_vma *page_lock_anon_vma(struct page *page) 317struct anon_vma *__page_lock_anon_vma(struct page *page)
318{ 318{
319 struct anon_vma *anon_vma, *root_anon_vma; 319 struct anon_vma *anon_vma, *root_anon_vma;
320 unsigned long anon_mapping; 320 unsigned long anon_mapping;
@@ -348,6 +348,8 @@ out:
348} 348}
349 349
350void page_unlock_anon_vma(struct anon_vma *anon_vma) 350void page_unlock_anon_vma(struct anon_vma *anon_vma)
351 __releases(&anon_vma->root->lock)
352 __releases(RCU)
351{ 353{
352 anon_vma_unlock(anon_vma); 354 anon_vma_unlock(anon_vma);
353 rcu_read_unlock(); 355 rcu_read_unlock();