aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-08-02 06:01:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-05 00:31:34 -0400
commit529ae9aaa08378cfe2a4350bded76f32cc8ff0ce (patch)
treed3ae998f9876c72a83a022805103a92111852b21
parente9ba9698187ddbc0c5bfcf41de0349a662d23d02 (diff)
mm: rename page trylock
Converting page lock to new locking bitops requires a change of page flag operation naming, so we might as well convert it to something nicer (!TestSetPageLocked_Lock => trylock_page, SetPageLocked => set_page_locked). This also facilitates lockdeping of page lock. Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--fs/afs/write.c2
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/jbd/commit.c4
-rw-r--r--fs/jbd2/commit.c2
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/splice.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c4
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/pagemap.h67
-rw-r--r--mm/filemap.c12
-rw-r--r--mm/memory.c2
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swap.c2
-rw-r--r--mm/swap_state.c8
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c4
-rw-r--r--mm/vmscan.c4
20 files changed, 74 insertions, 59 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index d3b8ebb83776..3d36270a8b4d 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1747,7 +1747,7 @@ st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
1747 */ 1747 */
1748 flush_dcache_page(pages[i]); 1748 flush_dcache_page(pages[i]);
1749 /* ?? Is locking needed? I don't think so */ 1749 /* ?? Is locking needed? I don't think so */
1750 /* if (TestSetPageLocked(pages[i])) 1750 /* if (!trylock_page(pages[i]))
1751 goto out_unlock; */ 1751 goto out_unlock; */
1752 } 1752 }
1753 1753
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 9a849ad3c489..065b4e10681a 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -404,7 +404,7 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
404 page = pages[loop]; 404 page = pages[loop];
405 if (page->index > wb->last) 405 if (page->index > wb->last)
406 break; 406 break;
407 if (TestSetPageLocked(page)) 407 if (!trylock_page(page))
408 break; 408 break;
409 if (!PageDirty(page) || 409 if (!PageDirty(page) ||
410 page_private(page) != (unsigned long) wb) { 410 page_private(page) != (unsigned long) wb) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0aac824371a5..e692c42f24b5 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1280,7 +1280,7 @@ retry:
1280 1280
1281 if (first < 0) 1281 if (first < 0)
1282 lock_page(page); 1282 lock_page(page);
1283 else if (TestSetPageLocked(page)) 1283 else if (!trylock_page(page))
1284 break; 1284 break;
1285 1285
1286 if (unlikely(page->mapping != mapping)) { 1286 if (unlikely(page->mapping != mapping)) {
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 2eccbfaa1d48..81a9ad7177ca 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -63,7 +63,7 @@ static void release_buffer_page(struct buffer_head *bh)
63 goto nope; 63 goto nope;
64 64
65 /* OK, it's a truncated page */ 65 /* OK, it's a truncated page */
66 if (TestSetPageLocked(page)) 66 if (!trylock_page(page))
67 goto nope; 67 goto nope;
68 68
69 page_cache_get(page); 69 page_cache_get(page);
@@ -446,7 +446,7 @@ void journal_commit_transaction(journal_t *journal)
446 spin_lock(&journal->j_list_lock); 446 spin_lock(&journal->j_list_lock);
447 } 447 }
448 if (unlikely(!buffer_uptodate(bh))) { 448 if (unlikely(!buffer_uptodate(bh))) {
449 if (TestSetPageLocked(bh->b_page)) { 449 if (!trylock_page(bh->b_page)) {
450 spin_unlock(&journal->j_list_lock); 450 spin_unlock(&journal->j_list_lock);
451 lock_page(bh->b_page); 451 lock_page(bh->b_page);
452 spin_lock(&journal->j_list_lock); 452 spin_lock(&journal->j_list_lock);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index adf0395f318e..f2ad061e95ec 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -67,7 +67,7 @@ static void release_buffer_page(struct buffer_head *bh)
67 goto nope; 67 goto nope;
68 68
69 /* OK, it's a truncated page */ 69 /* OK, it's a truncated page */
70 if (TestSetPageLocked(page)) 70 if (!trylock_page(page))
71 goto nope; 71 goto nope;
72 72
73 page_cache_get(page); 73 page_cache_get(page);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index c8f60ee183b5..ce2208b27118 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -627,7 +627,7 @@ static int journal_list_still_alive(struct super_block *s,
627static void release_buffer_page(struct buffer_head *bh) 627static void release_buffer_page(struct buffer_head *bh)
628{ 628{
629 struct page *page = bh->b_page; 629 struct page *page = bh->b_page;
630 if (!page->mapping && !TestSetPageLocked(page)) { 630 if (!page->mapping && trylock_page(page)) {
631 page_cache_get(page); 631 page_cache_get(page);
632 put_bh(bh); 632 put_bh(bh);
633 if (!page->mapping) 633 if (!page->mapping)
diff --git a/fs/splice.c b/fs/splice.c
index b30311ba8af6..1bbc6f4bb09c 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -371,7 +371,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
371 * for an in-flight io page 371 * for an in-flight io page
372 */ 372 */
373 if (flags & SPLICE_F_NONBLOCK) { 373 if (flags & SPLICE_F_NONBLOCK) {
374 if (TestSetPageLocked(page)) { 374 if (!trylock_page(page)) {
375 error = -EAGAIN; 375 error = -EAGAIN;
376 break; 376 break;
377 } 377 }
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 0b211cba1909..fa73179233ad 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -675,7 +675,7 @@ xfs_probe_cluster(
675 } else 675 } else
676 pg_offset = PAGE_CACHE_SIZE; 676 pg_offset = PAGE_CACHE_SIZE;
677 677
678 if (page->index == tindex && !TestSetPageLocked(page)) { 678 if (page->index == tindex && trylock_page(page)) {
679 pg_len = xfs_probe_page(page, pg_offset, mapped); 679 pg_len = xfs_probe_page(page, pg_offset, mapped);
680 unlock_page(page); 680 unlock_page(page);
681 } 681 }
@@ -759,7 +759,7 @@ xfs_convert_page(
759 759
760 if (page->index != tindex) 760 if (page->index != tindex)
761 goto fail; 761 goto fail;
762 if (TestSetPageLocked(page)) 762 if (!trylock_page(page))
763 goto fail; 763 goto fail;
764 if (PageWriteback(page)) 764 if (PageWriteback(page))
765 goto fail_unlock_page; 765 goto fail_unlock_page;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 25aaccdb2f26..c74d3e875314 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -163,7 +163,7 @@ static inline int Page##uname(struct page *page) \
163 163
164struct page; /* forward declaration */ 164struct page; /* forward declaration */
165 165
166PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) 166TESTPAGEFLAG(Locked, locked)
167PAGEFLAG(Error, error) 167PAGEFLAG(Error, error)
168PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 168PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
169PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 169PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 69ed3cb1197a..5da31c12101c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -250,29 +250,6 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
250 return read_cache_page(mapping, index, filler, data); 250 return read_cache_page(mapping, index, filler, data);
251} 251}
252 252
253int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
254 pgoff_t index, gfp_t gfp_mask);
255int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
256 pgoff_t index, gfp_t gfp_mask);
257extern void remove_from_page_cache(struct page *page);
258extern void __remove_from_page_cache(struct page *page);
259
260/*
261 * Like add_to_page_cache_locked, but used to add newly allocated pages:
262 * the page is new, so we can just run SetPageLocked() against it.
263 */
264static inline int add_to_page_cache(struct page *page,
265 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
266{
267 int error;
268
269 SetPageLocked(page);
270 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
271 if (unlikely(error))
272 ClearPageLocked(page);
273 return error;
274}
275
276/* 253/*
277 * Return byte-offset into filesystem object for page. 254 * Return byte-offset into filesystem object for page.
278 */ 255 */
@@ -294,13 +271,28 @@ extern int __lock_page_killable(struct page *page);
294extern void __lock_page_nosync(struct page *page); 271extern void __lock_page_nosync(struct page *page);
295extern void unlock_page(struct page *page); 272extern void unlock_page(struct page *page);
296 273
274static inline void set_page_locked(struct page *page)
275{
276 set_bit(PG_locked, &page->flags);
277}
278
279static inline void clear_page_locked(struct page *page)
280{
281 clear_bit(PG_locked, &page->flags);
282}
283
284static inline int trylock_page(struct page *page)
285{
286 return !test_and_set_bit(PG_locked, &page->flags);
287}
288
297/* 289/*
298 * lock_page may only be called if we have the page's inode pinned. 290 * lock_page may only be called if we have the page's inode pinned.
299 */ 291 */
300static inline void lock_page(struct page *page) 292static inline void lock_page(struct page *page)
301{ 293{
302 might_sleep(); 294 might_sleep();
303 if (TestSetPageLocked(page)) 295 if (!trylock_page(page))
304 __lock_page(page); 296 __lock_page(page);
305} 297}
306 298
@@ -312,7 +304,7 @@ static inline void lock_page(struct page *page)
312static inline int lock_page_killable(struct page *page) 304static inline int lock_page_killable(struct page *page)
313{ 305{
314 might_sleep(); 306 might_sleep();
315 if (TestSetPageLocked(page)) 307 if (!trylock_page(page))
316 return __lock_page_killable(page); 308 return __lock_page_killable(page);
317 return 0; 309 return 0;
318} 310}
@@ -324,7 +316,7 @@ static inline int lock_page_killable(struct page *page)
324static inline void lock_page_nosync(struct page *page) 316static inline void lock_page_nosync(struct page *page)
325{ 317{
326 might_sleep(); 318 might_sleep();
327 if (TestSetPageLocked(page)) 319 if (!trylock_page(page))
328 __lock_page_nosync(page); 320 __lock_page_nosync(page);
329} 321}
330 322
@@ -409,4 +401,27 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
409 return ret; 401 return ret;
410} 402}
411 403
404int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
405 pgoff_t index, gfp_t gfp_mask);
406int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
407 pgoff_t index, gfp_t gfp_mask);
408extern void remove_from_page_cache(struct page *page);
409extern void __remove_from_page_cache(struct page *page);
410
411/*
412 * Like add_to_page_cache_locked, but used to add newly allocated pages:
413 * the page is new, so we can just run set_page_locked() against it.
414 */
415static inline int add_to_page_cache(struct page *page,
416 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
417{
418 int error;
419
420 set_page_locked(page);
421 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
422 if (unlikely(error))
423 clear_page_locked(page);
424 return error;
425}
426
412#endif /* _LINUX_PAGEMAP_H */ 427#endif /* _LINUX_PAGEMAP_H */
diff --git a/mm/filemap.c b/mm/filemap.c
index d97d1ad55473..54e968650855 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
558 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 558 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
559 * 559 *
560 * The first mb is necessary to safely close the critical section opened by the 560 * The first mb is necessary to safely close the critical section opened by the
561 * TestSetPageLocked(), the second mb is necessary to enforce ordering between 561 * test_and_set_bit() to lock the page; the second mb is necessary to enforce
562 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 562 * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
563 * parallel wait_on_page_locked()). 563 * races with a parallel wait_on_page_locked()).
564 */ 564 */
565void unlock_page(struct page *page) 565void unlock_page(struct page *page)
566{ 566{
567 smp_mb__before_clear_bit(); 567 smp_mb__before_clear_bit();
568 if (!TestClearPageLocked(page)) 568 if (!test_and_clear_bit(PG_locked, &page->flags))
569 BUG(); 569 BUG();
570 smp_mb__after_clear_bit(); 570 smp_mb__after_clear_bit();
571 wake_up_page(page, PG_locked); 571 wake_up_page(page, PG_locked);
@@ -931,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
931 struct page *page = find_get_page(mapping, index); 931 struct page *page = find_get_page(mapping, index);
932 932
933 if (page) { 933 if (page) {
934 if (!TestSetPageLocked(page)) 934 if (trylock_page(page))
935 return page; 935 return page;
936 page_cache_release(page); 936 page_cache_release(page);
937 return NULL; 937 return NULL;
@@ -1027,7 +1027,7 @@ find_page:
1027 if (inode->i_blkbits == PAGE_CACHE_SHIFT || 1027 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1028 !mapping->a_ops->is_partially_uptodate) 1028 !mapping->a_ops->is_partially_uptodate)
1029 goto page_not_up_to_date; 1029 goto page_not_up_to_date;
1030 if (TestSetPageLocked(page)) 1030 if (!trylock_page(page))
1031 goto page_not_up_to_date; 1031 goto page_not_up_to_date;
1032 if (!mapping->a_ops->is_partially_uptodate(page, 1032 if (!mapping->a_ops->is_partially_uptodate(page,
1033 desc, offset)) 1033 desc, offset))
diff --git a/mm/memory.c b/mm/memory.c
index a472bcd4b061..1002f473f497 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1789,7 +1789,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1789 * not dirty accountable. 1789 * not dirty accountable.
1790 */ 1790 */
1791 if (PageAnon(old_page)) { 1791 if (PageAnon(old_page)) {
1792 if (!TestSetPageLocked(old_page)) { 1792 if (trylock_page(old_page)) {
1793 reuse = can_share_swap_page(old_page); 1793 reuse = can_share_swap_page(old_page);
1794 unlock_page(old_page); 1794 unlock_page(old_page);
1795 } 1795 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 153572fb60b8..2a80136b23bb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -605,7 +605,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
605 * establishing additional references. We are the only one 605 * establishing additional references. We are the only one
606 * holding a reference to the new page at this point. 606 * holding a reference to the new page at this point.
607 */ 607 */
608 if (TestSetPageLocked(newpage)) 608 if (!trylock_page(newpage))
609 BUG(); 609 BUG();
610 610
611 /* Prepare mapping for the new page.*/ 611 /* Prepare mapping for the new page.*/
@@ -667,7 +667,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
667 BUG_ON(charge); 667 BUG_ON(charge);
668 668
669 rc = -EAGAIN; 669 rc = -EAGAIN;
670 if (TestSetPageLocked(page)) { 670 if (!trylock_page(page)) {
671 if (!force) 671 if (!force)
672 goto move_newpage; 672 goto move_newpage;
673 lock_page(page); 673 lock_page(page);
diff --git a/mm/rmap.c b/mm/rmap.c
index 94a5246a3f98..1ea4e6fcee77 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -422,7 +422,7 @@ int page_referenced(struct page *page, int is_locked,
422 referenced += page_referenced_anon(page, mem_cont); 422 referenced += page_referenced_anon(page, mem_cont);
423 else if (is_locked) 423 else if (is_locked)
424 referenced += page_referenced_file(page, mem_cont); 424 referenced += page_referenced_file(page, mem_cont);
425 else if (TestSetPageLocked(page)) 425 else if (!trylock_page(page))
426 referenced++; 426 referenced++;
427 else { 427 else {
428 if (page->mapping) 428 if (page->mapping)
diff --git a/mm/shmem.c b/mm/shmem.c
index c1e5a3b4f758..04fb4f1ab88e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1265,7 +1265,7 @@ repeat:
1265 } 1265 }
1266 1266
1267 /* We have to do this with page locked to prevent races */ 1267 /* We have to do this with page locked to prevent races */
1268 if (TestSetPageLocked(swappage)) { 1268 if (!trylock_page(swappage)) {
1269 shmem_swp_unmap(entry); 1269 shmem_swp_unmap(entry);
1270 spin_unlock(&info->lock); 1270 spin_unlock(&info->lock);
1271 wait_on_page_locked(swappage); 1271 wait_on_page_locked(swappage);
@@ -1329,7 +1329,7 @@ repeat:
1329 shmem_swp_unmap(entry); 1329 shmem_swp_unmap(entry);
1330 filepage = find_get_page(mapping, idx); 1330 filepage = find_get_page(mapping, idx);
1331 if (filepage && 1331 if (filepage &&
1332 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { 1332 (!PageUptodate(filepage) || !trylock_page(filepage))) {
1333 spin_unlock(&info->lock); 1333 spin_unlock(&info->lock);
1334 wait_on_page_locked(filepage); 1334 wait_on_page_locked(filepage);
1335 page_cache_release(filepage); 1335 page_cache_release(filepage);
diff --git a/mm/swap.c b/mm/swap.c
index 7417a2adbe50..9e0cb3118079 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -444,7 +444,7 @@ void pagevec_strip(struct pagevec *pvec)
444 for (i = 0; i < pagevec_count(pvec); i++) { 444 for (i = 0; i < pagevec_count(pvec); i++) {
445 struct page *page = pvec->pages[i]; 445 struct page *page = pvec->pages[i];
446 446
447 if (PagePrivate(page) && !TestSetPageLocked(page)) { 447 if (PagePrivate(page) && trylock_page(page)) {
448 if (PagePrivate(page)) 448 if (PagePrivate(page))
449 try_to_release_page(page, 0); 449 try_to_release_page(page, 0);
450 unlock_page(page); 450 unlock_page(page);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b8035b055129..167cf2dc8a03 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -201,7 +201,7 @@ void delete_from_swap_cache(struct page *page)
201 */ 201 */
202static inline void free_swap_cache(struct page *page) 202static inline void free_swap_cache(struct page *page)
203{ 203{
204 if (PageSwapCache(page) && !TestSetPageLocked(page)) { 204 if (PageSwapCache(page) && trylock_page(page)) {
205 remove_exclusive_swap_page(page); 205 remove_exclusive_swap_page(page);
206 unlock_page(page); 206 unlock_page(page);
207 } 207 }
@@ -302,9 +302,9 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
302 * re-using the just freed swap entry for an existing page. 302 * re-using the just freed swap entry for an existing page.
303 * May fail (-ENOMEM) if radix-tree node allocation failed. 303 * May fail (-ENOMEM) if radix-tree node allocation failed.
304 */ 304 */
305 SetPageLocked(new_page); 305 set_page_locked(new_page);
306 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); 306 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
307 if (!err) { 307 if (likely(!err)) {
308 /* 308 /*
309 * Initiate read into locked page and return. 309 * Initiate read into locked page and return.
310 */ 310 */
@@ -312,7 +312,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
312 swap_readpage(NULL, new_page); 312 swap_readpage(NULL, new_page);
313 return new_page; 313 return new_page;
314 } 314 }
315 ClearPageLocked(new_page); 315 clear_page_locked(new_page);
316 swap_free(entry); 316 swap_free(entry);
317 } while (err != -ENOMEM); 317 } while (err != -ENOMEM);
318 318
diff --git a/mm/swapfile.c b/mm/swapfile.c
index bb7f79641f9e..1e330f2998fa 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -403,7 +403,7 @@ void free_swap_and_cache(swp_entry_t entry)
403 if (p) { 403 if (p) {
404 if (swap_entry_free(p, swp_offset(entry)) == 1) { 404 if (swap_entry_free(p, swp_offset(entry)) == 1) {
405 page = find_get_page(&swapper_space, entry.val); 405 page = find_get_page(&swapper_space, entry.val);
406 if (page && unlikely(TestSetPageLocked(page))) { 406 if (page && unlikely(!trylock_page(page))) {
407 page_cache_release(page); 407 page_cache_release(page);
408 page = NULL; 408 page = NULL;
409 } 409 }
diff --git a/mm/truncate.c b/mm/truncate.c
index 894e9a70699f..250505091d37 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -187,7 +187,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
187 if (page_index > next) 187 if (page_index > next)
188 next = page_index; 188 next = page_index;
189 next++; 189 next++;
190 if (TestSetPageLocked(page)) 190 if (!trylock_page(page))
191 continue; 191 continue;
192 if (PageWriteback(page)) { 192 if (PageWriteback(page)) {
193 unlock_page(page); 193 unlock_page(page);
@@ -280,7 +280,7 @@ unsigned long __invalidate_mapping_pages(struct address_space *mapping,
280 pgoff_t index; 280 pgoff_t index;
281 int lock_failed; 281 int lock_failed;
282 282
283 lock_failed = TestSetPageLocked(page); 283 lock_failed = !trylock_page(page);
284 284
285 /* 285 /*
286 * We really shouldn't be looking at the ->index of an 286 * We really shouldn't be looking at the ->index of an
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 75be453628bf..1ff1a58e7c10 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -496,7 +496,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
496 page = lru_to_page(page_list); 496 page = lru_to_page(page_list);
497 list_del(&page->lru); 497 list_del(&page->lru);
498 498
499 if (TestSetPageLocked(page)) 499 if (!trylock_page(page))
500 goto keep; 500 goto keep;
501 501
502 VM_BUG_ON(PageActive(page)); 502 VM_BUG_ON(PageActive(page));
@@ -582,7 +582,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
582 * A synchronous write - probably a ramdisk. Go 582 * A synchronous write - probably a ramdisk. Go
583 * ahead and try to reclaim the page. 583 * ahead and try to reclaim the page.
584 */ 584 */
585 if (TestSetPageLocked(page)) 585 if (!trylock_page(page))
586 goto keep; 586 goto keep;
587 if (PageDirty(page) || PageWriteback(page)) 587 if (PageDirty(page) || PageWriteback(page))
588 goto keep_locked; 588 goto keep_locked;