aboutsummaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c26
1 files changed, 17 insertions, 9 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 54e968650855..494ff20b6cfa 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1100,8 +1100,9 @@ page_ok:
1100 1100
1101page_not_up_to_date: 1101page_not_up_to_date:
1102 /* Get exclusive access to the page ... */ 1102 /* Get exclusive access to the page ... */
1103 if (lock_page_killable(page)) 1103 error = lock_page_killable(page);
1104 goto readpage_eio; 1104 if (unlikely(error))
1105 goto readpage_error;
1105 1106
1106page_not_up_to_date_locked: 1107page_not_up_to_date_locked:
1107 /* Did it get truncated before we got the lock? */ 1108 /* Did it get truncated before we got the lock? */
@@ -1130,8 +1131,9 @@ readpage:
1130 } 1131 }
1131 1132
1132 if (!PageUptodate(page)) { 1133 if (!PageUptodate(page)) {
1133 if (lock_page_killable(page)) 1134 error = lock_page_killable(page);
1134 goto readpage_eio; 1135 if (unlikely(error))
1136 goto readpage_error;
1135 if (!PageUptodate(page)) { 1137 if (!PageUptodate(page)) {
1136 if (page->mapping == NULL) { 1138 if (page->mapping == NULL) {
1137 /* 1139 /*
@@ -1143,15 +1145,14 @@ readpage:
1143 } 1145 }
1144 unlock_page(page); 1146 unlock_page(page);
1145 shrink_readahead_size_eio(filp, ra); 1147 shrink_readahead_size_eio(filp, ra);
1146 goto readpage_eio; 1148 error = -EIO;
1149 goto readpage_error;
1147 } 1150 }
1148 unlock_page(page); 1151 unlock_page(page);
1149 } 1152 }
1150 1153
1151 goto page_ok; 1154 goto page_ok;
1152 1155
1153readpage_eio:
1154 error = -EIO;
1155readpage_error: 1156readpage_error:
1156 /* UHHUH! A synchronous read error occurred. Report it */ 1157 /* UHHUH! A synchronous read error occurred. Report it */
1157 desc->error = error; 1158 desc->error = error;
@@ -2129,13 +2130,20 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
2129 * After a write we want buffered reads to be sure to go to disk to get 2130 * After a write we want buffered reads to be sure to go to disk to get
2130 * the new data. We invalidate clean cached page from the region we're 2131 * the new data. We invalidate clean cached page from the region we're
2131 * about to write. We do this *before* the write so that we can return 2132 * about to write. We do this *before* the write so that we can return
2132 * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). 2133 * without clobbering -EIOCBQUEUED from ->direct_IO().
2133 */ 2134 */
2134 if (mapping->nrpages) { 2135 if (mapping->nrpages) {
2135 written = invalidate_inode_pages2_range(mapping, 2136 written = invalidate_inode_pages2_range(mapping,
2136 pos >> PAGE_CACHE_SHIFT, end); 2137 pos >> PAGE_CACHE_SHIFT, end);
2137 if (written) 2138 /*
2139 * If a page can not be invalidated, return 0 to fall back
2140 * to buffered write.
2141 */
2142 if (written) {
2143 if (written == -EBUSY)
2144 return 0;
2138 goto out; 2145 goto out;
2146 }
2139 } 2147 }
2140 2148
2141 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs); 2149 written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);