aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 03:08:03 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:57 -0500
commit7c8ee9a86340db686cd4314e9944dc9b6111bda9 (patch)
tree80638e1658556b4fd7c0b92d571aaac854245bd3 /include/linux/mm.h
parentf205b2fe62d321403525065a4cb31b6bff1bbe53 (diff)
[PATCH] mm: simplify vmscan vs release refcounting
The VM has an interesting race where a page refcount can drop to zero, but it is still on the LRU lists for a short time. This was solved by testing a 0->1 refcount transition when picking up pages from the LRU, and dropping the refcount in that case. Instead, use atomic_add_unless to ensure we never pick up a 0 refcount page from the LRU, thus a 0 refcount page will never have its refcount elevated until it is allocated again. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h19
1 files changed, 11 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 498ff8778fb6..b12d5c76420d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -301,17 +301,20 @@ struct page {
301 * Drop a ref, return true if the logical refcount fell to zero (the page has 301 * Drop a ref, return true if the logical refcount fell to zero (the page has
302 * no users) 302 * no users)
303 */ 303 */
304#define put_page_testzero(p) \ 304static inline int put_page_testzero(struct page *page)
305 ({ \ 305{
306 BUG_ON(atomic_read(&(p)->_count) == -1);\ 306 BUG_ON(atomic_read(&page->_count) == -1);
307 atomic_add_negative(-1, &(p)->_count); \ 307 return atomic_add_negative(-1, &page->_count);
308 }) 308}
309 309
310/* 310/*
311 * Grab a ref, return true if the page previously had a logical refcount of 311 * Try to grab a ref unless the page has a refcount of zero, return false if
312 * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page 312 * that is the case.
313 */ 313 */
314#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) 314static inline int get_page_unless_zero(struct page *page)
315{
316 return atomic_add_unless(&page->_count, 1, -1);
317}
315 318
316#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) 319#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
317#define __put_page(p) atomic_dec(&(p)->_count) 320#define __put_page(p) atomic_dec(&(p)->_count)