aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-08-02 06:01:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-05 00:31:34 -0400
commit529ae9aaa08378cfe2a4350bded76f32cc8ff0ce (patch)
treed3ae998f9876c72a83a022805103a92111852b21 /include/linux/pagemap.h
parente9ba9698187ddbc0c5bfcf41de0349a662d23d02 (diff)
mm: rename page trylock
Converting page lock to new locking bitops requires a change of page flag operation naming, so we might as well convert it to something nicer (!TestSetPageLocked_Lock => trylock_page, SetPageLocked => set_page_locked). This also facilitates lockdeping of page lock. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h67
1 files changed, 41 insertions, 26 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 69ed3cb1197a..5da31c12101c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
250 return read_cache_page(mapping, index, filler, data); 250 return read_cache_page(mapping, index, filler, data);
251} 251}
252 252
253int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
254 pgoff_t index, gfp_t gfp_mask);
255int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
256 pgoff_t index, gfp_t gfp_mask);
257extern void remove_from_page_cache(struct page *page);
258extern void __remove_from_page_cache(struct page *page);
259
260/*
261 * Like add_to_page_cache_locked, but used to add newly allocated pages:
262 * the page is new, so we can just run SetPageLocked() against it.
263 */
264static inline int add_to_page_cache(struct page *page,
265 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
266{
267 int error;
268
269 SetPageLocked(page);
270 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
271 if (unlikely(error))
272 ClearPageLocked(page);
273 return error;
274}
275
276/* 253/*
277 * Return byte-offset into filesystem object for page. 254 * Return byte-offset into filesystem object for page.
278 */ 255 */
@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page);
294extern void __lock_page_nosync(struct page *page); 271extern void __lock_page_nosync(struct page *page);
295extern void unlock_page(struct page *page); 272extern void unlock_page(struct page *page);
296 273
274static inline void set_page_locked(struct page *page)
275{
276 set_bit(PG_locked, &page->flags);
277}
278
279static inline void clear_page_locked(struct page *page)
280{
281 clear_bit(PG_locked, &page->flags);
282}
283
284static inline int trylock_page(struct page *page)
285{
286 return !test_and_set_bit(PG_locked, &page->flags);
287}
288
297/* 289/*
298 * lock_page may only be called if we have the page's inode pinned. 290 * lock_page may only be called if we have the page's inode pinned.
299 */ 291 */
300static inline void lock_page(struct page *page) 292static inline void lock_page(struct page *page)
301{ 293{
302 might_sleep(); 294 might_sleep();
303 if (TestSetPageLocked(page)) 295 if (!trylock_page(page))
304 __lock_page(page); 296 __lock_page(page);
305} 297}
306 298
@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page)
312static inline int lock_page_killable(struct page *page) 304static inline int lock_page_killable(struct page *page)
313{ 305{
314 might_sleep(); 306 might_sleep();
315 if (TestSetPageLocked(page)) 307 if (!trylock_page(page))
316 return __lock_page_killable(page); 308 return __lock_page_killable(page);
317 return 0; 309 return 0;
318} 310}
@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page)
324static inline void lock_page_nosync(struct page *page) 316static inline void lock_page_nosync(struct page *page)
325{ 317{
326 might_sleep(); 318 might_sleep();
327 if (TestSetPageLocked(page)) 319 if (!trylock_page(page))
328 __lock_page_nosync(page); 320 __lock_page_nosync(page);
329} 321}
330 322
@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
409 return ret; 401 return ret;
410} 402}
411 403
404int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
405 pgoff_t index, gfp_t gfp_mask);
406int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
407 pgoff_t index, gfp_t gfp_mask);
408extern void remove_from_page_cache(struct page *page);
409extern void __remove_from_page_cache(struct page *page);
410
411/*
412 * Like add_to_page_cache_locked, but used to add newly allocated pages:
413 * the page is new, so we can just run set_page_locked() against it.
414 */
415static inline int add_to_page_cache(struct page *page,
416 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
417{
418 int error;
419
420 set_page_locked(page);
421 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
422 if (unlikely(error))
423 clear_page_locked(page);
424 return error;
425}
426
412#endif /* _LINUX_PAGEMAP_H */ 427#endif /* _LINUX_PAGEMAP_H */