diff options
| author | Steve French <sfrench@us.ibm.com> | 2006-03-30 22:35:56 -0500 |
|---|---|---|
| committer | Steve French <sfrench@us.ibm.com> | 2006-03-30 22:35:56 -0500 |
| commit | d62e54abca1146981fc9f98f85ff398a113a22c2 (patch) | |
| tree | 870420dbc4c65e716dcef8a802aafdc0ef97a8b4 /include/linux/mm.h | |
| parent | fd4a0b92db6a57cba8d03efbe1cebf91f9124ce0 (diff) | |
| parent | ce362c009250340358a7221f3cdb7954cbf19c01 (diff) | |
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'include/linux/mm.h')
| -rw-r--r-- | include/linux/mm.h | 48 |
1 files changed, 25 insertions, 23 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 498ff8778fb6..6aa016f1d3ae 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -286,43 +286,34 @@ struct page { | |||
| 286 | * | 286 | * |
| 287 | * Also, many kernel routines increase the page count before a critical | 287 | * Also, many kernel routines increase the page count before a critical |
| 288 | * routine so they can be sure the page doesn't go away from under them. | 288 | * routine so they can be sure the page doesn't go away from under them. |
| 289 | * | ||
| 290 | * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we | ||
| 291 | * can use atomic_add_negative(-1, page->_count) to detect when the page | ||
| 292 | * becomes free and so that we can also use atomic_inc_and_test to atomically | ||
| 293 | * detect when we just tried to grab a ref on a page which some other CPU has | ||
| 294 | * already deemed to be freeable. | ||
| 295 | * | ||
| 296 | * NO code should make assumptions about this internal detail! Use the provided | ||
| 297 | * macros which retain the old rules: page_count(page) == 0 is a free page. | ||
| 298 | */ | 289 | */ |
| 299 | 290 | ||
| 300 | /* | 291 | /* |
| 301 | * Drop a ref, return true if the logical refcount fell to zero (the page has | 292 | * Drop a ref, return true if the logical refcount fell to zero (the page has |
| 302 | * no users) | 293 | * no users) |
| 303 | */ | 294 | */ |
| 304 | #define put_page_testzero(p) \ | 295 | static inline int put_page_testzero(struct page *page) |
| 305 | ({ \ | 296 | { |
| 306 | BUG_ON(atomic_read(&(p)->_count) == -1);\ | 297 | BUG_ON(atomic_read(&page->_count) == 0); |
| 307 | atomic_add_negative(-1, &(p)->_count); \ | 298 | return atomic_dec_and_test(&page->_count); |
| 308 | }) | 299 | } |
| 309 | 300 | ||
| 310 | /* | 301 | /* |
| 311 | * Grab a ref, return true if the page previously had a logical refcount of | 302 | * Try to grab a ref unless the page has a refcount of zero, return false if |
| 312 | * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page | 303 | * that is the case. |
| 313 | */ | 304 | */ |
| 314 | #define get_page_testone(p) atomic_inc_and_test(&(p)->_count) | 305 | static inline int get_page_unless_zero(struct page *page) |
| 315 | 306 | { | |
| 316 | #define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) | 307 | return atomic_inc_not_zero(&page->_count); |
| 317 | #define __put_page(p) atomic_dec(&(p)->_count) | 308 | } |
| 318 | 309 | ||
| 319 | extern void FASTCALL(__page_cache_release(struct page *)); | 310 | extern void FASTCALL(__page_cache_release(struct page *)); |
| 320 | 311 | ||
| 321 | static inline int page_count(struct page *page) | 312 | static inline int page_count(struct page *page) |
| 322 | { | 313 | { |
| 323 | if (PageCompound(page)) | 314 | if (unlikely(PageCompound(page))) |
| 324 | page = (struct page *)page_private(page); | 315 | page = (struct page *)page_private(page); |
| 325 | return atomic_read(&page->_count) + 1; | 316 | return atomic_read(&page->_count); |
| 326 | } | 317 | } |
| 327 | 318 | ||
| 328 | static inline void get_page(struct page *page) | 319 | static inline void get_page(struct page *page) |
| @@ -332,8 +323,19 @@ static inline void get_page(struct page *page) | |||
| 332 | atomic_inc(&page->_count); | 323 | atomic_inc(&page->_count); |
| 333 | } | 324 | } |
| 334 | 325 | ||
| 326 | /* | ||
| 327 | * Setup the page count before being freed into the page allocator for | ||
| 328 | * the first time (boot or memory hotplug) | ||
| 329 | */ | ||
| 330 | static inline void init_page_count(struct page *page) | ||
| 331 | { | ||
| 332 | atomic_set(&page->_count, 1); | ||
| 333 | } | ||
| 334 | |||
| 335 | void put_page(struct page *page); | 335 | void put_page(struct page *page); |
| 336 | 336 | ||
| 337 | void split_page(struct page *page, unsigned int order); | ||
| 338 | |||
| 337 | /* | 339 | /* |
| 338 | * Multiple processes may "see" the same page. E.g. for untouched | 340 | * Multiple processes may "see" the same page. E.g. for untouched |
| 339 | * mappings of /dev/null, all processes see the same page full of | 341 | * mappings of /dev/null, all processes see the same page full of |
| @@ -1046,7 +1048,7 @@ int in_gate_area_no_task(unsigned long addr); | |||
| 1046 | 1048 | ||
| 1047 | int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, | 1049 | int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, |
| 1048 | void __user *, size_t *, loff_t *); | 1050 | void __user *, size_t *, loff_t *); |
| 1049 | int shrink_slab(unsigned long scanned, gfp_t gfp_mask, | 1051 | unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, |
| 1050 | unsigned long lru_pages); | 1052 | unsigned long lru_pages); |
| 1051 | void drop_pagecache(void); | 1053 | void drop_pagecache(void); |
| 1052 | void drop_slab(void); | 1054 | void drop_slab(void); |
