aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-03-22 03:08:03 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:53:57 -0500
commit8dc04efbfb3c08a08fb7a3b97348d5d561b26ae2 (patch)
treea48ce376d077745fdab23a33e5be1d48c979c3ae /include/linux/mm.h
parent7c8ee9a86340db686cd4314e9944dc9b6111bda9 (diff)
[PATCH] mm: de-skew page refcounting
atomic_add_unless (atomic_inc_not_zero) no longer requires an offset refcount to function correctly. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h19
1 files changed, 5 insertions, 14 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b12d5c76420d..9bbddf228cd9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -286,15 +286,6 @@ struct page {
286 * 286 *
287 * Also, many kernel routines increase the page count before a critical 287 * Also, many kernel routines increase the page count before a critical
288 * routine so they can be sure the page doesn't go away from under them. 288 * routine so they can be sure the page doesn't go away from under them.
289 *
290 * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
291 * can use atomic_add_negative(-1, page->_count) to detect when the page
292 * becomes free and so that we can also use atomic_inc_and_test to atomically
293 * detect when we just tried to grab a ref on a page which some other CPU has
294 * already deemed to be freeable.
295 *
296 * NO code should make assumptions about this internal detail! Use the provided
297 * macros which retain the old rules: page_count(page) == 0 is a free page.
298 */ 289 */
299 290
300/* 291/*
@@ -303,8 +294,8 @@ struct page {
303 */ 294 */
304static inline int put_page_testzero(struct page *page) 295static inline int put_page_testzero(struct page *page)
305{ 296{
306 BUG_ON(atomic_read(&page->_count) == -1); 297 BUG_ON(atomic_read(&page->_count) == 0);
307 return atomic_add_negative(-1, &page->_count); 298 return atomic_dec_and_test(&page->_count);
308} 299}
309 300
310/* 301/*
@@ -313,10 +304,10 @@ static inline int put_page_testzero(struct page *page)
313 */ 304 */
314static inline int get_page_unless_zero(struct page *page) 305static inline int get_page_unless_zero(struct page *page)
315{ 306{
316 return atomic_add_unless(&page->_count, 1, -1); 307 return atomic_inc_not_zero(&page->_count);
317} 308}
318 309
319#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) 310#define set_page_count(p,v) atomic_set(&(p)->_count, (v))
320#define __put_page(p) atomic_dec(&(p)->_count) 311#define __put_page(p) atomic_dec(&(p)->_count)
321 312
322extern void FASTCALL(__page_cache_release(struct page *)); 313extern void FASTCALL(__page_cache_release(struct page *));
@@ -325,7 +316,7 @@ static inline int page_count(struct page *page)
325{ 316{
326 if (PageCompound(page)) 317 if (PageCompound(page))
327 page = (struct page *)page_private(page); 318 page = (struct page *)page_private(page);
328 return atomic_read(&page->_count) + 1; 319 return atomic_read(&page->_count);
329} 320}
330 321
331static inline void get_page(struct page *page) 322static inline void get_page(struct page *page)