aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pagemap.h67
2 files changed, 42 insertions, 27 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 25aaccdb2f2..c74d3e87531 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \
163 163
164struct page; /* forward declaration */ 164struct page; /* forward declaration */
165 165
166PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) 166TESTPAGEFLAG(Locked, locked)
167PAGEFLAG(Error, error) 167PAGEFLAG(Error, error)
168PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 168PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
169PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 169PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 69ed3cb1197..5da31c12101 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
250 return read_cache_page(mapping, index, filler, data); 250 return read_cache_page(mapping, index, filler, data);
251} 251}
252 252
253int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
254 pgoff_t index, gfp_t gfp_mask);
255int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
256 pgoff_t index, gfp_t gfp_mask);
257extern void remove_from_page_cache(struct page *page);
258extern void __remove_from_page_cache(struct page *page);
259
260/*
261 * Like add_to_page_cache_locked, but used to add newly allocated pages:
262 * the page is new, so we can just run SetPageLocked() against it.
263 */
264static inline int add_to_page_cache(struct page *page,
265 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
266{
267 int error;
268
269 SetPageLocked(page);
270 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
271 if (unlikely(error))
272 ClearPageLocked(page);
273 return error;
274}
275
276/* 253/*
277 * Return byte-offset into filesystem object for page. 254 * Return byte-offset into filesystem object for page.
278 */ 255 */
@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page);
294extern void __lock_page_nosync(struct page *page); 271extern void __lock_page_nosync(struct page *page);
295extern void unlock_page(struct page *page); 272extern void unlock_page(struct page *page);
296 273
274static inline void set_page_locked(struct page *page)
275{
276 set_bit(PG_locked, &page->flags);
277}
278
279static inline void clear_page_locked(struct page *page)
280{
281 clear_bit(PG_locked, &page->flags);
282}
283
284static inline int trylock_page(struct page *page)
285{
286 return !test_and_set_bit(PG_locked, &page->flags);
287}
288
297/* 289/*
298 * lock_page may only be called if we have the page's inode pinned. 290 * lock_page may only be called if we have the page's inode pinned.
299 */ 291 */
300static inline void lock_page(struct page *page) 292static inline void lock_page(struct page *page)
301{ 293{
302 might_sleep(); 294 might_sleep();
303 if (TestSetPageLocked(page)) 295 if (!trylock_page(page))
304 __lock_page(page); 296 __lock_page(page);
305} 297}
306 298
@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page)
312static inline int lock_page_killable(struct page *page) 304static inline int lock_page_killable(struct page *page)
313{ 305{
314 might_sleep(); 306 might_sleep();
315 if (TestSetPageLocked(page)) 307 if (!trylock_page(page))
316 return __lock_page_killable(page); 308 return __lock_page_killable(page);
317 return 0; 309 return 0;
318} 310}
@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page)
324static inline void lock_page_nosync(struct page *page) 316static inline void lock_page_nosync(struct page *page)
325{ 317{
326 might_sleep(); 318 might_sleep();
327 if (TestSetPageLocked(page)) 319 if (!trylock_page(page))
328 __lock_page_nosync(page); 320 __lock_page_nosync(page);
329} 321}
330 322
@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
409 return ret; 401 return ret;
410} 402}
411 403
404int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
405 pgoff_t index, gfp_t gfp_mask);
406int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
407 pgoff_t index, gfp_t gfp_mask);
408extern void remove_from_page_cache(struct page *page);
409extern void __remove_from_page_cache(struct page *page);
410
411/*
412 * Like add_to_page_cache_locked, but used to add newly allocated pages:
413 * the page is new, so we can just run set_page_locked() against it.
414 */
415static inline int add_to_page_cache(struct page *page,
416 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
417{
418 int error;
419
420 set_page_locked(page);
421 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
422 if (unlikely(error))
423 clear_page_locked(page);
424 return error;
425}
426
412#endif /* _LINUX_PAGEMAP_H */ 427#endif /* _LINUX_PAGEMAP_H */