aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-10-18 23:26:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:52:32 -0400
commit8413ac9d8c9a1366a4f57880723126cd24e5a5c3 (patch)
treefcee6ff670dcfccf895a48e92d27f52902d34301
parenta978d6f521063514812a7094dbe5036e056e4de3 (diff)
mm: page lock use lock bitops
trylock_page, unlock_page open and close a critical section. Hence, we can use the lock bitops to get the desired memory ordering. Also, mark trylock as likely to succeed (and remove the annotation from callers). Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/swapfile.c2
3 files changed, 7 insertions, 10 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7334b2b6c4c6..709742be02f0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -311,7 +311,7 @@ static inline void __clear_page_locked(struct page *page)
311 311
312static inline int trylock_page(struct page *page) 312static inline int trylock_page(struct page *page)
313{ 313{
314 return !test_and_set_bit(PG_locked, &page->flags); 314 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
315} 315}
316 316
317/* 317/*
diff --git a/mm/filemap.c b/mm/filemap.c
index a1ddd2557af2..e1b23fda48de 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -573,17 +573,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
573 * mechananism between PageLocked pages and PageWriteback pages is shared. 573 * mechananism between PageLocked pages and PageWriteback pages is shared.
574 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 574 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
575 * 575 *
576 * The first mb is necessary to safely close the critical section opened by the 576 * The mb is necessary to enforce ordering between the clear_bit and the read
577 * test_and_set_bit() to lock the page; the second mb is necessary to enforce 577 * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
578 * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
579 * races with a parallel wait_on_page_locked()).
580 */ 578 */
581void unlock_page(struct page *page) 579void unlock_page(struct page *page)
582{ 580{
583 smp_mb__before_clear_bit(); 581 VM_BUG_ON(!PageLocked(page));
584 if (!test_and_clear_bit(PG_locked, &page->flags)) 582 clear_bit_unlock(PG_locked, &page->flags);
585 BUG(); 583 smp_mb__after_clear_bit();
586 smp_mb__after_clear_bit();
587 wake_up_page(page, PG_locked); 584 wake_up_page(page, PG_locked);
588} 585}
589EXPORT_SYMBOL(unlock_page); 586EXPORT_SYMBOL(unlock_page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2a97fafa3d89..90cb67a5417c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -422,7 +422,7 @@ void free_swap_and_cache(swp_entry_t entry)
422 if (p) { 422 if (p) {
423 if (swap_entry_free(p, swp_offset(entry)) == 1) { 423 if (swap_entry_free(p, swp_offset(entry)) == 1) {
424 page = find_get_page(&swapper_space, entry.val); 424 page = find_get_page(&swapper_space, entry.val);
425 if (page && unlikely(!trylock_page(page))) { 425 if (page && !trylock_page(page)) {
426 page_cache_release(page); 426 page_cache_release(page);
427 page = NULL; 427 page = NULL;
428 } 428 }