diff options
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 42bbc6909ba4..876bc595d0f8 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -558,14 +558,14 @@ EXPORT_SYMBOL(wait_on_page_bit); | |||
558 | * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. | 558 | * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. |
559 | * | 559 | * |
560 | * The first mb is necessary to safely close the critical section opened by the | 560 | * The first mb is necessary to safely close the critical section opened by the |
561 | * TestSetPageLocked(), the second mb is necessary to enforce ordering between | 561 | * test_and_set_bit() to lock the page; the second mb is necessary to enforce |
562 | * the clear_bit and the read of the waitqueue (to avoid SMP races with a | 562 | * ordering between the clear_bit and the read of the waitqueue (to avoid SMP |
563 | * parallel wait_on_page_locked()). | 563 | * races with a parallel wait_on_page_locked()). |
564 | */ | 564 | */ |
565 | void unlock_page(struct page *page) | 565 | void unlock_page(struct page *page) |
566 | { | 566 | { |
567 | smp_mb__before_clear_bit(); | 567 | smp_mb__before_clear_bit(); |
568 | if (!TestClearPageLocked(page)) | 568 | if (!test_and_clear_bit(PG_locked, &page->flags)) |
569 | BUG(); | 569 | BUG(); |
570 | smp_mb__after_clear_bit(); | 570 | smp_mb__after_clear_bit(); |
571 | wake_up_page(page, PG_locked); | 571 | wake_up_page(page, PG_locked); |
@@ -931,7 +931,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) | |||
931 | struct page *page = find_get_page(mapping, index); | 931 | struct page *page = find_get_page(mapping, index); |
932 | 932 | ||
933 | if (page) { | 933 | if (page) { |
934 | if (!TestSetPageLocked(page)) | 934 | if (trylock_page(page)) |
935 | return page; | 935 | return page; |
936 | page_cache_release(page); | 936 | page_cache_release(page); |
937 | return NULL; | 937 | return NULL; |
@@ -1027,7 +1027,7 @@ find_page: | |||
1027 | if (inode->i_blkbits == PAGE_CACHE_SHIFT || | 1027 | if (inode->i_blkbits == PAGE_CACHE_SHIFT || |
1028 | !mapping->a_ops->is_partially_uptodate) | 1028 | !mapping->a_ops->is_partially_uptodate) |
1029 | goto page_not_up_to_date; | 1029 | goto page_not_up_to_date; |
1030 | if (TestSetPageLocked(page)) | 1030 | if (!trylock_page(page)) |
1031 | goto page_not_up_to_date; | 1031 | goto page_not_up_to_date; |
1032 | if (!mapping->a_ops->is_partially_uptodate(page, | 1032 | if (!mapping->a_ops->is_partially_uptodate(page, |
1033 | desc, offset)) | 1033 | desc, offset)) |
@@ -1879,7 +1879,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes) | |||
1879 | * The !iov->iov_len check ensures we skip over unlikely | 1879 | * The !iov->iov_len check ensures we skip over unlikely |
1880 | * zero-length segments (without overruning the iovec). | 1880 | * zero-length segments (without overruning the iovec). |
1881 | */ | 1881 | */ |
1882 | while (bytes || unlikely(!iov->iov_len && i->count)) { | 1882 | while (bytes || unlikely(i->count && !iov->iov_len)) { |
1883 | int copy; | 1883 | int copy; |
1884 | 1884 | ||
1885 | copy = min(bytes, iov->iov_len - base); | 1885 | copy = min(bytes, iov->iov_len - base); |
@@ -2129,13 +2129,20 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
2129 | * After a write we want buffered reads to be sure to go to disk to get | 2129 | * After a write we want buffered reads to be sure to go to disk to get |
2130 | * the new data. We invalidate clean cached page from the region we're | 2130 | * the new data. We invalidate clean cached page from the region we're |
2131 | * about to write. We do this *before* the write so that we can return | 2131 | * about to write. We do this *before* the write so that we can return |
2132 | * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). | 2132 | * without clobbering -EIOCBQUEUED from ->direct_IO(). |
2133 | */ | 2133 | */ |
2134 | if (mapping->nrpages) { | 2134 | if (mapping->nrpages) { |
2135 | written = invalidate_inode_pages2_range(mapping, | 2135 | written = invalidate_inode_pages2_range(mapping, |
2136 | pos >> PAGE_CACHE_SHIFT, end); | 2136 | pos >> PAGE_CACHE_SHIFT, end); |
2137 | if (written) | 2137 | /* |
2138 | * If a page can not be invalidated, return 0 to fall back | ||
2139 | * to buffered write. | ||
2140 | */ | ||
2141 | if (written) { | ||
2142 | if (written == -EBUSY) | ||
2143 | return 0; | ||
2138 | goto out; | 2144 | goto out; |
2145 | } | ||
2139 | } | 2146 | } |
2140 | 2147 | ||
2141 | written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); | 2148 | written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); |