aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5de7633e1dbe..54e968650855 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
558 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 558 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
559 * 559 *
560 * The first mb is necessary to safely close the critical section opened by the 560 * The first mb is necessary to safely close the critical section opened by the
561 * TestSetPageLocked(), the second mb is necessary to enforce ordering between 561 * test_and_set_bit() to lock the page; the second mb is necessary to enforce
562 * the clear_bit and the read of the waitqueue (to avoid SMP races with a 562 * ordering between the clear_bit and the read of the waitqueue (to avoid SMP
563 * parallel wait_on_page_locked()). 563 * races with a parallel wait_on_page_locked()).
564 */ 564 */
565void unlock_page(struct page *page) 565void unlock_page(struct page *page)
566{ 566{
567 smp_mb__before_clear_bit(); 567 smp_mb__before_clear_bit();
568 if (!TestClearPageLocked(page)) 568 if (!test_and_clear_bit(PG_locked, &page->flags))
569 BUG(); 569 BUG();
570 smp_mb__after_clear_bit(); 570 smp_mb__after_clear_bit();
571 wake_up_page(page, PG_locked); 571 wake_up_page(page, PG_locked);
@@ -931,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
931 struct page *page = find_get_page(mapping, index); 931 struct page *page = find_get_page(mapping, index);
932 932
933 if (page) { 933 if (page) {
934 if (!TestSetPageLocked(page)) 934 if (trylock_page(page))
935 return page; 935 return page;
936 page_cache_release(page); 936 page_cache_release(page);
937 return NULL; 937 return NULL;
@@ -1023,8 +1023,17 @@ find_page:
1023 ra, filp, page, 1023 ra, filp, page,
1024 index, last_index - index); 1024 index, last_index - index);
1025 } 1025 }
1026 if (!PageUptodate(page)) 1026 if (!PageUptodate(page)) {
1027 goto page_not_up_to_date; 1027 if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
1028 !mapping->a_ops->is_partially_uptodate)
1029 goto page_not_up_to_date;
1030 if (!trylock_page(page))
1031 goto page_not_up_to_date;
1032 if (!mapping->a_ops->is_partially_uptodate(page,
1033 desc, offset))
1034 goto page_not_up_to_date_locked;
1035 unlock_page(page);
1036 }
1028page_ok: 1037page_ok:
1029 /* 1038 /*
1030 * i_size must be checked after we know the page is Uptodate. 1039 * i_size must be checked after we know the page is Uptodate.
@@ -1094,6 +1103,7 @@ page_not_up_to_date:
1094 if (lock_page_killable(page)) 1103 if (lock_page_killable(page))
1095 goto readpage_eio; 1104 goto readpage_eio;
1096 1105
1106page_not_up_to_date_locked:
1097 /* Did it get truncated before we got the lock? */ 1107 /* Did it get truncated before we got the lock? */
1098 if (!page->mapping) { 1108 if (!page->mapping) {
1099 unlock_page(page); 1109 unlock_page(page);
@@ -1869,7 +1879,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
1869 * The !iov->iov_len check ensures we skip over unlikely 1879 * The !iov->iov_len check ensures we skip over unlikely
1870 * zero-length segments (without overruning the iovec). 1880 * zero-length segments (without overruning the iovec).
1871 */ 1881 */
1872 while (bytes || unlikely(!iov->iov_len && i->count)) { 1882 while (bytes || unlikely(i->count && !iov->iov_len)) {
1873 int copy; 1883 int copy;
1874 1884
1875 copy = min(bytes, iov->iov_len - base); 1885 copy = min(bytes, iov->iov_len - base);