aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 03:08:03 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:57 -0500
commit7c8ee9a86340db686cd4314e9944dc9b6111bda9 (patch)
tree80638e1658556b4fd7c0b92d571aaac854245bd3
parentf205b2fe62d321403525065a4cb31b6bff1bbe53 (diff)
[PATCH] mm: simplify vmscan vs release refcounting
The VM has an interesting race where a page refcount can drop to zero, but it is still on the LRU lists for a short time. This was solved by testing a 0->1 refcount transition when picking up pages from the LRU, and dropping the refcount in that case. Instead, use atomic_add_unless to ensure we never pick up a 0 refcount page from the LRU, thus a 0 refcount page will never have its refcount elevated until it is allocated again. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/mm.h19
-rw-r--r--mm/vmscan.c25
2 files changed, 22 insertions, 22 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 498ff8778fb6..b12d5c76420d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -301,17 +301,20 @@ struct page {
301 * Drop a ref, return true if the logical refcount fell to zero (the page has 301 * Drop a ref, return true if the logical refcount fell to zero (the page has
302 * no users) 302 * no users)
303 */ 303 */
304#define put_page_testzero(p) \ 304static inline int put_page_testzero(struct page *page)
305 ({ \ 305{
306 BUG_ON(atomic_read(&(p)->_count) == -1);\ 306 BUG_ON(atomic_read(&page->_count) == -1);
307 atomic_add_negative(-1, &(p)->_count); \ 307 return atomic_add_negative(-1, &page->_count);
308 }) 308}
309 309
310/* 310/*
311 * Grab a ref, return true if the page previously had a logical refcount of 311 * Try to grab a ref unless the page has a refcount of zero, return false if
312 * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page 312 * that is the case.
313 */ 313 */
314#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) 314static inline int get_page_unless_zero(struct page *page)
315{
316 return atomic_add_unless(&page->_count, 1, -1);
317}
315 318
316#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) 319#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
317#define __put_page(p) atomic_dec(&(p)->_count) 320#define __put_page(p) atomic_dec(&(p)->_count)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8e477b1a4838..e21bab4deda6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1083,29 +1083,26 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
1083 int scan = 0; 1083 int scan = 0;
1084 1084
1085 while (scan++ < nr_to_scan && !list_empty(src)) { 1085 while (scan++ < nr_to_scan && !list_empty(src)) {
1086 struct list_head *target;
1086 page = lru_to_page(src); 1087 page = lru_to_page(src);
1087 prefetchw_prev_lru_page(page, src, flags); 1088 prefetchw_prev_lru_page(page, src, flags);
1088 1089
1089 BUG_ON(!PageLRU(page)); 1090 BUG_ON(!PageLRU(page));
1090 1091
1091 list_del(&page->lru); 1092 list_del(&page->lru);
1092 if (unlikely(get_page_testone(page))) { 1093 target = src;
1094 if (likely(get_page_unless_zero(page))) {
1093 /* 1095 /*
1094 * It is being freed elsewhere 1096 * Be careful not to clear PageLRU until after we're
1097 * sure the page is not being freed elsewhere -- the
1098 * page release code relies on it.
1095 */ 1099 */
1096 __put_page(page); 1100 ClearPageLRU(page);
1097 list_add(&page->lru, src); 1101 target = dst;
1098 continue; 1102 nr_taken++;
1099 } 1103 } /* else it is being freed elsewhere */
1100 1104
1101 /* 1105 list_add(&page->lru, target);
1102 * Be careful not to clear PageLRU until after we're sure
1103 * the page is not being freed elsewhere -- the page release
1104 * code relies on it.
1105 */
1106 ClearPageLRU(page);
1107 list_add(&page->lru, dst);
1108 nr_taken++;
1109 } 1106 }
1110 1107
1111 *scanned = scan; 1108 *scanned = scan;