diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 391 |
1 files changed, 213 insertions, 178 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c2e30eea74dc..99628508cb11 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include "xfs_rw.h" | 38 | #include "xfs_rw.h" |
39 | #include "xfs_iomap.h" | 39 | #include "xfs_iomap.h" |
40 | #include "xfs_vnodeops.h" | 40 | #include "xfs_vnodeops.h" |
41 | #include "xfs_trace.h" | ||
42 | #include "xfs_bmap.h" | ||
41 | #include <linux/mpage.h> | 43 | #include <linux/mpage.h> |
42 | #include <linux/pagevec.h> | 44 | #include <linux/pagevec.h> |
43 | #include <linux/writeback.h> | 45 | #include <linux/writeback.h> |
@@ -76,7 +78,7 @@ xfs_ioend_wake( | |||
76 | wake_up(to_ioend_wq(ip)); | 78 | wake_up(to_ioend_wq(ip)); |
77 | } | 79 | } |
78 | 80 | ||
79 | STATIC void | 81 | void |
80 | xfs_count_page_state( | 82 | xfs_count_page_state( |
81 | struct page *page, | 83 | struct page *page, |
82 | int *delalloc, | 84 | int *delalloc, |
@@ -98,48 +100,6 @@ xfs_count_page_state( | |||
98 | } while ((bh = bh->b_this_page) != head); | 100 | } while ((bh = bh->b_this_page) != head); |
99 | } | 101 | } |
100 | 102 | ||
101 | #if defined(XFS_RW_TRACE) | ||
102 | void | ||
103 | xfs_page_trace( | ||
104 | int tag, | ||
105 | struct inode *inode, | ||
106 | struct page *page, | ||
107 | unsigned long pgoff) | ||
108 | { | ||
109 | xfs_inode_t *ip; | ||
110 | loff_t isize = i_size_read(inode); | ||
111 | loff_t offset = page_offset(page); | ||
112 | int delalloc = -1, unmapped = -1, unwritten = -1; | ||
113 | |||
114 | if (page_has_buffers(page)) | ||
115 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | ||
116 | |||
117 | ip = XFS_I(inode); | ||
118 | if (!ip->i_rwtrace) | ||
119 | return; | ||
120 | |||
121 | ktrace_enter(ip->i_rwtrace, | ||
122 | (void *)((unsigned long)tag), | ||
123 | (void *)ip, | ||
124 | (void *)inode, | ||
125 | (void *)page, | ||
126 | (void *)pgoff, | ||
127 | (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), | ||
128 | (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), | ||
129 | (void *)((unsigned long)((isize >> 32) & 0xffffffff)), | ||
130 | (void *)((unsigned long)(isize & 0xffffffff)), | ||
131 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | ||
132 | (void *)((unsigned long)(offset & 0xffffffff)), | ||
133 | (void *)((unsigned long)delalloc), | ||
134 | (void *)((unsigned long)unmapped), | ||
135 | (void *)((unsigned long)unwritten), | ||
136 | (void *)((unsigned long)current_pid()), | ||
137 | (void *)NULL); | ||
138 | } | ||
139 | #else | ||
140 | #define xfs_page_trace(tag, inode, page, pgoff) | ||
141 | #endif | ||
142 | |||
143 | STATIC struct block_device * | 103 | STATIC struct block_device * |
144 | xfs_find_bdev_for_inode( | 104 | xfs_find_bdev_for_inode( |
145 | struct xfs_inode *ip) | 105 | struct xfs_inode *ip) |
@@ -204,14 +164,17 @@ xfs_ioend_new_eof( | |||
204 | } | 164 | } |
205 | 165 | ||
206 | /* | 166 | /* |
207 | * Update on-disk file size now that data has been written to disk. | 167 | * Update on-disk file size now that data has been written to disk. The |
208 | * The current in-memory file size is i_size. If a write is beyond | 168 | * current in-memory file size is i_size. If a write is beyond eof i_new_size |
209 | * eof i_new_size will be the intended file size until i_size is | 169 | * will be the intended file size until i_size is updated. If this write does |
210 | * updated. If this write does not extend all the way to the valid | 170 | * not extend all the way to the valid file size then restrict this update to |
211 | * file size then restrict this update to the end of the write. | 171 | * the end of the write. |
172 | * | ||
173 | * This function does not block as blocking on the inode lock in IO completion | ||
174 | * can lead to IO completion order dependency deadlocks.. If it can't get the | ||
175 | * inode ilock it will return EAGAIN. Callers must handle this. | ||
212 | */ | 176 | */ |
213 | 177 | STATIC int | |
214 | STATIC void | ||
215 | xfs_setfilesize( | 178 | xfs_setfilesize( |
216 | xfs_ioend_t *ioend) | 179 | xfs_ioend_t *ioend) |
217 | { | 180 | { |
@@ -222,85 +185,19 @@ xfs_setfilesize( | |||
222 | ASSERT(ioend->io_type != IOMAP_READ); | 185 | ASSERT(ioend->io_type != IOMAP_READ); |
223 | 186 | ||
224 | if (unlikely(ioend->io_error)) | 187 | if (unlikely(ioend->io_error)) |
225 | return; | 188 | return 0; |
189 | |||
190 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) | ||
191 | return EAGAIN; | ||
226 | 192 | ||
227 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
228 | isize = xfs_ioend_new_eof(ioend); | 193 | isize = xfs_ioend_new_eof(ioend); |
229 | if (isize) { | 194 | if (isize) { |
230 | ip->i_d.di_size = isize; | 195 | ip->i_d.di_size = isize; |
231 | xfs_mark_inode_dirty_sync(ip); | 196 | xfs_mark_inode_dirty(ip); |
232 | } | 197 | } |
233 | 198 | ||
234 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 199 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
235 | } | 200 | return 0; |
236 | |||
237 | /* | ||
238 | * Buffered IO write completion for delayed allocate extents. | ||
239 | */ | ||
240 | STATIC void | ||
241 | xfs_end_bio_delalloc( | ||
242 | struct work_struct *work) | ||
243 | { | ||
244 | xfs_ioend_t *ioend = | ||
245 | container_of(work, xfs_ioend_t, io_work); | ||
246 | |||
247 | xfs_setfilesize(ioend); | ||
248 | xfs_destroy_ioend(ioend); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Buffered IO write completion for regular, written extents. | ||
253 | */ | ||
254 | STATIC void | ||
255 | xfs_end_bio_written( | ||
256 | struct work_struct *work) | ||
257 | { | ||
258 | xfs_ioend_t *ioend = | ||
259 | container_of(work, xfs_ioend_t, io_work); | ||
260 | |||
261 | xfs_setfilesize(ioend); | ||
262 | xfs_destroy_ioend(ioend); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * IO write completion for unwritten extents. | ||
267 | * | ||
268 | * Issue transactions to convert a buffer range from unwritten | ||
269 | * to written extents. | ||
270 | */ | ||
271 | STATIC void | ||
272 | xfs_end_bio_unwritten( | ||
273 | struct work_struct *work) | ||
274 | { | ||
275 | xfs_ioend_t *ioend = | ||
276 | container_of(work, xfs_ioend_t, io_work); | ||
277 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | ||
278 | xfs_off_t offset = ioend->io_offset; | ||
279 | size_t size = ioend->io_size; | ||
280 | |||
281 | if (likely(!ioend->io_error)) { | ||
282 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
283 | int error; | ||
284 | error = xfs_iomap_write_unwritten(ip, offset, size); | ||
285 | if (error) | ||
286 | ioend->io_error = error; | ||
287 | } | ||
288 | xfs_setfilesize(ioend); | ||
289 | } | ||
290 | xfs_destroy_ioend(ioend); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * IO read completion for regular, written extents. | ||
295 | */ | ||
296 | STATIC void | ||
297 | xfs_end_bio_read( | ||
298 | struct work_struct *work) | ||
299 | { | ||
300 | xfs_ioend_t *ioend = | ||
301 | container_of(work, xfs_ioend_t, io_work); | ||
302 | |||
303 | xfs_destroy_ioend(ioend); | ||
304 | } | 201 | } |
305 | 202 | ||
306 | /* | 203 | /* |
@@ -314,10 +211,10 @@ xfs_finish_ioend( | |||
314 | int wait) | 211 | int wait) |
315 | { | 212 | { |
316 | if (atomic_dec_and_test(&ioend->io_remaining)) { | 213 | if (atomic_dec_and_test(&ioend->io_remaining)) { |
317 | struct workqueue_struct *wq = xfsdatad_workqueue; | 214 | struct workqueue_struct *wq; |
318 | if (ioend->io_work.func == xfs_end_bio_unwritten) | ||
319 | wq = xfsconvertd_workqueue; | ||
320 | 215 | ||
216 | wq = (ioend->io_type == IOMAP_UNWRITTEN) ? | ||
217 | xfsconvertd_workqueue : xfsdatad_workqueue; | ||
321 | queue_work(wq, &ioend->io_work); | 218 | queue_work(wq, &ioend->io_work); |
322 | if (wait) | 219 | if (wait) |
323 | flush_workqueue(wq); | 220 | flush_workqueue(wq); |
@@ -325,6 +222,53 @@ xfs_finish_ioend( | |||
325 | } | 222 | } |
326 | 223 | ||
327 | /* | 224 | /* |
225 | * IO write completion. | ||
226 | */ | ||
227 | STATIC void | ||
228 | xfs_end_io( | ||
229 | struct work_struct *work) | ||
230 | { | ||
231 | xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); | ||
232 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | ||
233 | int error = 0; | ||
234 | |||
235 | /* | ||
236 | * For unwritten extents we need to issue transactions to convert a | ||
237 | * range to normal written extens after the data I/O has finished. | ||
238 | */ | ||
239 | if (ioend->io_type == IOMAP_UNWRITTEN && | ||
240 | likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { | ||
241 | |||
242 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, | ||
243 | ioend->io_size); | ||
244 | if (error) | ||
245 | ioend->io_error = error; | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * We might have to update the on-disk file size after extending | ||
250 | * writes. | ||
251 | */ | ||
252 | if (ioend->io_type != IOMAP_READ) { | ||
253 | error = xfs_setfilesize(ioend); | ||
254 | ASSERT(!error || error == EAGAIN); | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * If we didn't complete processing of the ioend, requeue it to the | ||
259 | * tail of the workqueue for another attempt later. Otherwise destroy | ||
260 | * it. | ||
261 | */ | ||
262 | if (error == EAGAIN) { | ||
263 | atomic_inc(&ioend->io_remaining); | ||
264 | xfs_finish_ioend(ioend, 0); | ||
265 | /* ensure we don't spin on blocked ioends */ | ||
266 | delay(1); | ||
267 | } else | ||
268 | xfs_destroy_ioend(ioend); | ||
269 | } | ||
270 | |||
271 | /* | ||
328 | * Allocate and initialise an IO completion structure. | 272 | * Allocate and initialise an IO completion structure. |
329 | * We need to track unwritten extent write completion here initially. | 273 | * We need to track unwritten extent write completion here initially. |
330 | * We'll need to extend this for updating the ondisk inode size later | 274 | * We'll need to extend this for updating the ondisk inode size later |
@@ -355,15 +299,7 @@ xfs_alloc_ioend( | |||
355 | ioend->io_offset = 0; | 299 | ioend->io_offset = 0; |
356 | ioend->io_size = 0; | 300 | ioend->io_size = 0; |
357 | 301 | ||
358 | if (type == IOMAP_UNWRITTEN) | 302 | INIT_WORK(&ioend->io_work, xfs_end_io); |
359 | INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); | ||
360 | else if (type == IOMAP_DELAY) | ||
361 | INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); | ||
362 | else if (type == IOMAP_READ) | ||
363 | INIT_WORK(&ioend->io_work, xfs_end_bio_read); | ||
364 | else | ||
365 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); | ||
366 | |||
367 | return ioend; | 303 | return ioend; |
368 | } | 304 | } |
369 | 305 | ||
@@ -380,7 +316,7 @@ xfs_map_blocks( | |||
380 | return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); | 316 | return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); |
381 | } | 317 | } |
382 | 318 | ||
383 | STATIC_INLINE int | 319 | STATIC int |
384 | xfs_iomap_valid( | 320 | xfs_iomap_valid( |
385 | xfs_iomap_t *iomapp, | 321 | xfs_iomap_t *iomapp, |
386 | loff_t offset) | 322 | loff_t offset) |
@@ -412,8 +348,9 @@ xfs_end_bio( | |||
412 | 348 | ||
413 | STATIC void | 349 | STATIC void |
414 | xfs_submit_ioend_bio( | 350 | xfs_submit_ioend_bio( |
415 | xfs_ioend_t *ioend, | 351 | struct writeback_control *wbc, |
416 | struct bio *bio) | 352 | xfs_ioend_t *ioend, |
353 | struct bio *bio) | ||
417 | { | 354 | { |
418 | atomic_inc(&ioend->io_remaining); | 355 | atomic_inc(&ioend->io_remaining); |
419 | bio->bi_private = ioend; | 356 | bio->bi_private = ioend; |
@@ -424,9 +361,10 @@ xfs_submit_ioend_bio( | |||
424 | * but don't update the inode size until I/O completion. | 361 | * but don't update the inode size until I/O completion. |
425 | */ | 362 | */ |
426 | if (xfs_ioend_new_eof(ioend)) | 363 | if (xfs_ioend_new_eof(ioend)) |
427 | xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); | 364 | xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); |
428 | 365 | ||
429 | submit_bio(WRITE, bio); | 366 | submit_bio(wbc->sync_mode == WB_SYNC_ALL ? |
367 | WRITE_SYNC_PLUG : WRITE, bio); | ||
430 | ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); | 368 | ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); |
431 | bio_put(bio); | 369 | bio_put(bio); |
432 | } | 370 | } |
@@ -505,6 +443,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) | |||
505 | */ | 443 | */ |
506 | STATIC void | 444 | STATIC void |
507 | xfs_submit_ioend( | 445 | xfs_submit_ioend( |
446 | struct writeback_control *wbc, | ||
508 | xfs_ioend_t *ioend) | 447 | xfs_ioend_t *ioend) |
509 | { | 448 | { |
510 | xfs_ioend_t *head = ioend; | 449 | xfs_ioend_t *head = ioend; |
@@ -533,19 +472,19 @@ xfs_submit_ioend( | |||
533 | retry: | 472 | retry: |
534 | bio = xfs_alloc_ioend_bio(bh); | 473 | bio = xfs_alloc_ioend_bio(bh); |
535 | } else if (bh->b_blocknr != lastblock + 1) { | 474 | } else if (bh->b_blocknr != lastblock + 1) { |
536 | xfs_submit_ioend_bio(ioend, bio); | 475 | xfs_submit_ioend_bio(wbc, ioend, bio); |
537 | goto retry; | 476 | goto retry; |
538 | } | 477 | } |
539 | 478 | ||
540 | if (bio_add_buffer(bio, bh) != bh->b_size) { | 479 | if (bio_add_buffer(bio, bh) != bh->b_size) { |
541 | xfs_submit_ioend_bio(ioend, bio); | 480 | xfs_submit_ioend_bio(wbc, ioend, bio); |
542 | goto retry; | 481 | goto retry; |
543 | } | 482 | } |
544 | 483 | ||
545 | lastblock = bh->b_blocknr; | 484 | lastblock = bh->b_blocknr; |
546 | } | 485 | } |
547 | if (bio) | 486 | if (bio) |
548 | xfs_submit_ioend_bio(ioend, bio); | 487 | xfs_submit_ioend_bio(wbc, ioend, bio); |
549 | xfs_finish_ioend(ioend, 0); | 488 | xfs_finish_ioend(ioend, 0); |
550 | } while ((ioend = next) != NULL); | 489 | } while ((ioend = next) != NULL); |
551 | } | 490 | } |
@@ -904,16 +843,9 @@ xfs_convert_page( | |||
904 | 843 | ||
905 | if (startio) { | 844 | if (startio) { |
906 | if (count) { | 845 | if (count) { |
907 | struct backing_dev_info *bdi; | ||
908 | |||
909 | bdi = inode->i_mapping->backing_dev_info; | ||
910 | wbc->nr_to_write--; | 846 | wbc->nr_to_write--; |
911 | if (bdi_write_congested(bdi)) { | 847 | if (wbc->nr_to_write <= 0) |
912 | wbc->encountered_congestion = 1; | ||
913 | done = 1; | ||
914 | } else if (wbc->nr_to_write <= 0) { | ||
915 | done = 1; | 848 | done = 1; |
916 | } | ||
917 | } | 849 | } |
918 | xfs_start_page_writeback(page, !page_dirty, count); | 850 | xfs_start_page_writeback(page, !page_dirty, count); |
919 | } | 851 | } |
@@ -962,6 +894,125 @@ xfs_cluster_write( | |||
962 | } | 894 | } |
963 | } | 895 | } |
964 | 896 | ||
897 | STATIC void | ||
898 | xfs_vm_invalidatepage( | ||
899 | struct page *page, | ||
900 | unsigned long offset) | ||
901 | { | ||
902 | trace_xfs_invalidatepage(page->mapping->host, page, offset); | ||
903 | block_invalidatepage(page, offset); | ||
904 | } | ||
905 | |||
906 | /* | ||
907 | * If the page has delalloc buffers on it, we need to punch them out before we | ||
908 | * invalidate the page. If we don't, we leave a stale delalloc mapping on the | ||
909 | * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read | ||
910 | * is done on that same region - the delalloc extent is returned when none is | ||
911 | * supposed to be there. | ||
912 | * | ||
913 | * We prevent this by truncating away the delalloc regions on the page before | ||
914 | * invalidating it. Because they are delalloc, we can do this without needing a | ||
915 | * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this | ||
916 | * truncation without a transaction as there is no space left for block | ||
917 | * reservation (typically why we see a ENOSPC in writeback). | ||
918 | * | ||
919 | * This is not a performance critical path, so for now just do the punching a | ||
920 | * buffer head at a time. | ||
921 | */ | ||
922 | STATIC void | ||
923 | xfs_aops_discard_page( | ||
924 | struct page *page) | ||
925 | { | ||
926 | struct inode *inode = page->mapping->host; | ||
927 | struct xfs_inode *ip = XFS_I(inode); | ||
928 | struct buffer_head *bh, *head; | ||
929 | loff_t offset = page_offset(page); | ||
930 | ssize_t len = 1 << inode->i_blkbits; | ||
931 | |||
932 | if (!xfs_is_delayed_page(page, IOMAP_DELAY)) | ||
933 | goto out_invalidate; | ||
934 | |||
935 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | ||
936 | goto out_invalidate; | ||
937 | |||
938 | xfs_fs_cmn_err(CE_ALERT, ip->i_mount, | ||
939 | "page discard on page %p, inode 0x%llx, offset %llu.", | ||
940 | page, ip->i_ino, offset); | ||
941 | |||
942 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
943 | bh = head = page_buffers(page); | ||
944 | do { | ||
945 | int done; | ||
946 | xfs_fileoff_t offset_fsb; | ||
947 | xfs_bmbt_irec_t imap; | ||
948 | int nimaps = 1; | ||
949 | int error; | ||
950 | xfs_fsblock_t firstblock; | ||
951 | xfs_bmap_free_t flist; | ||
952 | |||
953 | if (!buffer_delay(bh)) | ||
954 | goto next_buffer; | ||
955 | |||
956 | offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); | ||
957 | |||
958 | /* | ||
959 | * Map the range first and check that it is a delalloc extent | ||
960 | * before trying to unmap the range. Otherwise we will be | ||
961 | * trying to remove a real extent (which requires a | ||
962 | * transaction) or a hole, which is probably a bad idea... | ||
963 | */ | ||
964 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, | ||
965 | XFS_BMAPI_ENTIRE, NULL, 0, &imap, | ||
966 | &nimaps, NULL, NULL); | ||
967 | |||
968 | if (error) { | ||
969 | /* something screwed, just bail */ | ||
970 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
971 | xfs_fs_cmn_err(CE_ALERT, ip->i_mount, | ||
972 | "page discard failed delalloc mapping lookup."); | ||
973 | } | ||
974 | break; | ||
975 | } | ||
976 | if (!nimaps) { | ||
977 | /* nothing there */ | ||
978 | goto next_buffer; | ||
979 | } | ||
980 | if (imap.br_startblock != DELAYSTARTBLOCK) { | ||
981 | /* been converted, ignore */ | ||
982 | goto next_buffer; | ||
983 | } | ||
984 | WARN_ON(imap.br_blockcount == 0); | ||
985 | |||
986 | /* | ||
987 | * Note: while we initialise the firstblock/flist pair, they | ||
988 | * should never be used because blocks should never be | ||
989 | * allocated or freed for a delalloc extent and hence we need | ||
990 | * don't cancel or finish them after the xfs_bunmapi() call. | ||
991 | */ | ||
992 | xfs_bmap_init(&flist, &firstblock); | ||
993 | error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, | ||
994 | &flist, NULL, &done); | ||
995 | |||
996 | ASSERT(!flist.xbf_count && !flist.xbf_first); | ||
997 | if (error) { | ||
998 | /* something screwed, just bail */ | ||
999 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
1000 | xfs_fs_cmn_err(CE_ALERT, ip->i_mount, | ||
1001 | "page discard unable to remove delalloc mapping."); | ||
1002 | } | ||
1003 | break; | ||
1004 | } | ||
1005 | next_buffer: | ||
1006 | offset += len; | ||
1007 | |||
1008 | } while ((bh = bh->b_this_page) != head); | ||
1009 | |||
1010 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1011 | out_invalidate: | ||
1012 | xfs_vm_invalidatepage(page, 0); | ||
1013 | return; | ||
1014 | } | ||
1015 | |||
965 | /* | 1016 | /* |
966 | * Calling this without startio set means we are being asked to make a dirty | 1017 | * Calling this without startio set means we are being asked to make a dirty |
967 | * page ready for freeing it's buffers. When called with startio set then | 1018 | * page ready for freeing it's buffers. When called with startio set then |
@@ -1198,7 +1249,7 @@ xfs_page_state_convert( | |||
1198 | } | 1249 | } |
1199 | 1250 | ||
1200 | if (iohead) | 1251 | if (iohead) |
1201 | xfs_submit_ioend(iohead); | 1252 | xfs_submit_ioend(wbc, iohead); |
1202 | 1253 | ||
1203 | return page_dirty; | 1254 | return page_dirty; |
1204 | 1255 | ||
@@ -1213,7 +1264,7 @@ error: | |||
1213 | */ | 1264 | */ |
1214 | if (err != -EAGAIN) { | 1265 | if (err != -EAGAIN) { |
1215 | if (!unmapped) | 1266 | if (!unmapped) |
1216 | block_invalidatepage(page, 0); | 1267 | xfs_aops_discard_page(page); |
1217 | ClearPageUptodate(page); | 1268 | ClearPageUptodate(page); |
1218 | } | 1269 | } |
1219 | return err; | 1270 | return err; |
@@ -1249,7 +1300,7 @@ xfs_vm_writepage( | |||
1249 | int delalloc, unmapped, unwritten; | 1300 | int delalloc, unmapped, unwritten; |
1250 | struct inode *inode = page->mapping->host; | 1301 | struct inode *inode = page->mapping->host; |
1251 | 1302 | ||
1252 | xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); | 1303 | trace_xfs_writepage(inode, page, 0); |
1253 | 1304 | ||
1254 | /* | 1305 | /* |
1255 | * We need a transaction if: | 1306 | * We need a transaction if: |
@@ -1354,7 +1405,7 @@ xfs_vm_releasepage( | |||
1354 | .nr_to_write = 1, | 1405 | .nr_to_write = 1, |
1355 | }; | 1406 | }; |
1356 | 1407 | ||
1357 | xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); | 1408 | trace_xfs_releasepage(inode, page, 0); |
1358 | 1409 | ||
1359 | if (!page_has_buffers(page)) | 1410 | if (!page_has_buffers(page)) |
1360 | return 0; | 1411 | return 0; |
@@ -1535,7 +1586,7 @@ xfs_end_io_direct( | |||
1535 | * didn't map an unwritten extent so switch it's completion | 1586 | * didn't map an unwritten extent so switch it's completion |
1536 | * handler. | 1587 | * handler. |
1537 | */ | 1588 | */ |
1538 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); | 1589 | ioend->io_type = IOMAP_NEW; |
1539 | xfs_finish_ioend(ioend, 0); | 1590 | xfs_finish_ioend(ioend, 0); |
1540 | } | 1591 | } |
1541 | 1592 | ||
@@ -1562,19 +1613,13 @@ xfs_vm_direct_IO( | |||
1562 | 1613 | ||
1563 | bdev = xfs_find_bdev_for_inode(XFS_I(inode)); | 1614 | bdev = xfs_find_bdev_for_inode(XFS_I(inode)); |
1564 | 1615 | ||
1565 | if (rw == WRITE) { | 1616 | iocb->private = xfs_alloc_ioend(inode, rw == WRITE ? |
1566 | iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); | 1617 | IOMAP_UNWRITTEN : IOMAP_READ); |
1567 | ret = blockdev_direct_IO_own_locking(rw, iocb, inode, | 1618 | |
1568 | bdev, iov, offset, nr_segs, | 1619 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, |
1569 | xfs_get_blocks_direct, | 1620 | offset, nr_segs, |
1570 | xfs_end_io_direct); | 1621 | xfs_get_blocks_direct, |
1571 | } else { | 1622 | xfs_end_io_direct); |
1572 | iocb->private = xfs_alloc_ioend(inode, IOMAP_READ); | ||
1573 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | ||
1574 | bdev, iov, offset, nr_segs, | ||
1575 | xfs_get_blocks_direct, | ||
1576 | xfs_end_io_direct); | ||
1577 | } | ||
1578 | 1623 | ||
1579 | if (unlikely(ret != -EIOCBQUEUED && iocb->private)) | 1624 | if (unlikely(ret != -EIOCBQUEUED && iocb->private)) |
1580 | xfs_destroy_ioend(iocb->private); | 1625 | xfs_destroy_ioend(iocb->private); |
@@ -1629,16 +1674,6 @@ xfs_vm_readpages( | |||
1629 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); | 1674 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); |
1630 | } | 1675 | } |
1631 | 1676 | ||
1632 | STATIC void | ||
1633 | xfs_vm_invalidatepage( | ||
1634 | struct page *page, | ||
1635 | unsigned long offset) | ||
1636 | { | ||
1637 | xfs_page_trace(XFS_INVALIDPAGE_ENTER, | ||
1638 | page->mapping->host, page, offset); | ||
1639 | block_invalidatepage(page, offset); | ||
1640 | } | ||
1641 | |||
1642 | const struct address_space_operations xfs_address_space_operations = { | 1677 | const struct address_space_operations xfs_address_space_operations = { |
1643 | .readpage = xfs_vm_readpage, | 1678 | .readpage = xfs_vm_readpage, |
1644 | .readpages = xfs_vm_readpages, | 1679 | .readpages = xfs_vm_readpages, |