aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h48
1 files changed, 25 insertions, 23 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 498ff8778fb6..6aa016f1d3ae 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -286,43 +286,34 @@ struct page {
286 * 286 *
287 * Also, many kernel routines increase the page count before a critical 287 * Also, many kernel routines increase the page count before a critical
288 * routine so they can be sure the page doesn't go away from under them. 288 * routine so they can be sure the page doesn't go away from under them.
289 *
290 * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
291 * can use atomic_add_negative(-1, page->_count) to detect when the page
292 * becomes free and so that we can also use atomic_inc_and_test to atomically
293 * detect when we just tried to grab a ref on a page which some other CPU has
294 * already deemed to be freeable.
295 *
296 * NO code should make assumptions about this internal detail! Use the provided
297 * macros which retain the old rules: page_count(page) == 0 is a free page.
298 */ 289 */
299 290
300/* 291/*
301 * Drop a ref, return true if the logical refcount fell to zero (the page has 292 * Drop a ref, return true if the logical refcount fell to zero (the page has
302 * no users) 293 * no users)
303 */ 294 */
304#define put_page_testzero(p) \ 295static inline int put_page_testzero(struct page *page)
305 ({ \ 296{
306 BUG_ON(atomic_read(&(p)->_count) == -1);\ 297 BUG_ON(atomic_read(&page->_count) == 0);
307 atomic_add_negative(-1, &(p)->_count); \ 298 return atomic_dec_and_test(&page->_count);
308 }) 299}
309 300
310/* 301/*
311 * Grab a ref, return true if the page previously had a logical refcount of 302 * Try to grab a ref unless the page has a refcount of zero, return false if
312 * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page 303 * that is the case.
313 */ 304 */
314#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) 305static inline int get_page_unless_zero(struct page *page)
315 306{
316#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) 307 return atomic_inc_not_zero(&page->_count);
317#define __put_page(p) atomic_dec(&(p)->_count) 308}
318 309
319extern void FASTCALL(__page_cache_release(struct page *)); 310extern void FASTCALL(__page_cache_release(struct page *));
320 311
321static inline int page_count(struct page *page) 312static inline int page_count(struct page *page)
322{ 313{
323 if (PageCompound(page)) 314 if (unlikely(PageCompound(page)))
324 page = (struct page *)page_private(page); 315 page = (struct page *)page_private(page);
325 return atomic_read(&page->_count) + 1; 316 return atomic_read(&page->_count);
326} 317}
327 318
328static inline void get_page(struct page *page) 319static inline void get_page(struct page *page)
@@ -332,8 +323,19 @@ static inline void get_page(struct page *page)
332 atomic_inc(&page->_count); 323 atomic_inc(&page->_count);
333} 324}
334 325
326/*
327 * Setup the page count before being freed into the page allocator for
328 * the first time (boot or memory hotplug)
329 */
330static inline void init_page_count(struct page *page)
331{
332 atomic_set(&page->_count, 1);
333}
334
335void put_page(struct page *page); 335void put_page(struct page *page);
336 336
337void split_page(struct page *page, unsigned int order);
338
337/* 339/*
338 * Multiple processes may "see" the same page. E.g. for untouched 340 * Multiple processes may "see" the same page. E.g. for untouched
339 * mappings of /dev/null, all processes see the same page full of 341 * mappings of /dev/null, all processes see the same page full of
@@ -1046,7 +1048,7 @@ int in_gate_area_no_task(unsigned long addr);
1046 1048
1047int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, 1049int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
1048 void __user *, size_t *, loff_t *); 1050 void __user *, size_t *, loff_t *);
1049int shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1051unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1050 unsigned long lru_pages); 1052 unsigned long lru_pages);
1051void drop_pagecache(void); 1053void drop_pagecache(void);
1052void drop_slab(void); 1054void drop_slab(void);