aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 76bea88cbebc..81fb9bff0d4f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -65,7 +65,6 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
65 * ->private_lock (__free_pte->__set_page_dirty_buffers) 65 * ->private_lock (__free_pte->__set_page_dirty_buffers)
66 * ->swap_lock (exclusive_swap_page, others) 66 * ->swap_lock (exclusive_swap_page, others)
67 * ->mapping->tree_lock 67 * ->mapping->tree_lock
68 * ->zone.lock
69 * 68 *
70 * ->i_mutex 69 * ->i_mutex
71 * ->i_mmap_lock (truncate->unmap_mapping_range) 70 * ->i_mmap_lock (truncate->unmap_mapping_range)
@@ -528,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit)
528 __wake_up_bit(page_waitqueue(page), &page->flags, bit); 527 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
529} 528}
530 529
531void fastcall wait_on_page_bit(struct page *page, int bit_nr) 530void wait_on_page_bit(struct page *page, int bit_nr)
532{ 531{
533 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); 532 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
534 533
@@ -552,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
552 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 551 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
553 * parallel wait_on_page_locked()). 552 * parallel wait_on_page_locked()).
554 */ 553 */
555void fastcall unlock_page(struct page *page) 554void unlock_page(struct page *page)
556{ 555{
557 smp_mb__before_clear_bit(); 556 smp_mb__before_clear_bit();
558 if (!TestClearPageLocked(page)) 557 if (!TestClearPageLocked(page))
@@ -586,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback);
586 * chances are that on the second loop, the block layer's plug list is empty, 585 * chances are that on the second loop, the block layer's plug list is empty,
587 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. 586 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
588 */ 587 */
589void fastcall __lock_page(struct page *page) 588void __lock_page(struct page *page)
590{ 589{
591 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 590 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
592 591
@@ -607,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page)
607 * Variant of lock_page that does not require the caller to hold a reference 606 * Variant of lock_page that does not require the caller to hold a reference
608 * on the page's mapping. 607 * on the page's mapping.
609 */ 608 */
610void fastcall __lock_page_nosync(struct page *page) 609void __lock_page_nosync(struct page *page)
611{ 610{
612 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); 611 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
613 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, 612 __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -1277,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1277 * This adds the requested page to the page cache if it isn't already there, 1276 * This adds the requested page to the page cache if it isn't already there,
1278 * and schedules an I/O to read in its contents from disk. 1277 * and schedules an I/O to read in its contents from disk.
1279 */ 1278 */
1280static int fastcall page_cache_read(struct file * file, pgoff_t offset) 1279static int page_cache_read(struct file *file, pgoff_t offset)
1281{ 1280{
1282 struct address_space *mapping = file->f_mapping; 1281 struct address_space *mapping = file->f_mapping;
1283 struct page *page; 1282 struct page *page;