diff options
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r-- | include/linux/pagemap.h | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e12cdc6d79ee..716875e53520 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -48,7 +48,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping) | |||
48 | 48 | ||
49 | static inline int mapping_unevictable(struct address_space *mapping) | 49 | static inline int mapping_unevictable(struct address_space *mapping) |
50 | { | 50 | { |
51 | if (likely(mapping)) | 51 | if (mapping) |
52 | return test_bit(AS_UNEVICTABLE, &mapping->flags); | 52 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
53 | return !!mapping; | 53 | return !!mapping; |
54 | } | 54 | } |
@@ -219,6 +219,12 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) | |||
219 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); | 219 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
220 | } | 220 | } |
221 | 221 | ||
222 | static inline struct page *page_cache_alloc_readahead(struct address_space *x) | ||
223 | { | ||
224 | return __page_cache_alloc(mapping_gfp_mask(x) | | ||
225 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); | ||
226 | } | ||
227 | |||
222 | typedef int filler_t(void *, struct page *); | 228 | typedef int filler_t(void *, struct page *); |
223 | 229 | ||
224 | extern struct page * find_get_page(struct address_space *mapping, | 230 | extern struct page * find_get_page(struct address_space *mapping, |
@@ -298,7 +304,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, | |||
298 | 304 | ||
299 | extern void __lock_page(struct page *page); | 305 | extern void __lock_page(struct page *page); |
300 | extern int __lock_page_killable(struct page *page); | 306 | extern int __lock_page_killable(struct page *page); |
301 | extern void __lock_page_nosync(struct page *page); | 307 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
308 | unsigned int flags); | ||
302 | extern void unlock_page(struct page *page); | 309 | extern void unlock_page(struct page *page); |
303 | 310 | ||
304 | static inline void __set_page_locked(struct page *page) | 311 | static inline void __set_page_locked(struct page *page) |
@@ -340,22 +347,31 @@ static inline int lock_page_killable(struct page *page) | |||
340 | } | 347 | } |
341 | 348 | ||
342 | /* | 349 | /* |
343 | * lock_page_nosync should only be used if we can't pin the page's inode. | 350 | * lock_page_or_retry - Lock the page, unless this would block and the |
344 | * Doesn't play quite so well with block device plugging. | 351 | * caller indicated that it can handle a retry. |
345 | */ | 352 | */ |
346 | static inline void lock_page_nosync(struct page *page) | 353 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, |
354 | unsigned int flags) | ||
347 | { | 355 | { |
348 | might_sleep(); | 356 | might_sleep(); |
349 | if (!trylock_page(page)) | 357 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); |
350 | __lock_page_nosync(page); | ||
351 | } | 358 | } |
352 | 359 | ||
353 | /* | 360 | /* |
354 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. | 361 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |
355 | * Never use this directly! | 362 | * Never use this directly! |
356 | */ | 363 | */ |
357 | extern void wait_on_page_bit(struct page *page, int bit_nr); | 364 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
358 | 365 | ||
366 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); | ||
367 | |||
368 | static inline int wait_on_page_locked_killable(struct page *page) | ||
369 | { | ||
370 | if (PageLocked(page)) | ||
371 | return wait_on_page_bit_killable(page, PG_locked); | ||
372 | return 0; | ||
373 | } | ||
374 | |||
359 | /* | 375 | /* |
360 | * Wait for a page to be unlocked. | 376 | * Wait for a page to be unlocked. |
361 | * | 377 | * |
@@ -442,8 +458,9 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping, | |||
442 | pgoff_t index, gfp_t gfp_mask); | 458 | pgoff_t index, gfp_t gfp_mask); |
443 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | 459 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
444 | pgoff_t index, gfp_t gfp_mask); | 460 | pgoff_t index, gfp_t gfp_mask); |
445 | extern void remove_from_page_cache(struct page *page); | 461 | extern void delete_from_page_cache(struct page *page); |
446 | extern void __remove_from_page_cache(struct page *page); | 462 | extern void __delete_from_page_cache(struct page *page); |
463 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); | ||
447 | 464 | ||
448 | /* | 465 | /* |
449 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | 466 | * Like add_to_page_cache_locked, but used to add newly allocated pages: |