diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 392 |
1 files changed, 214 insertions, 178 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c2e30eea74dc..0f8b9968a803 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -38,6 +38,9 @@ | |||
38 | #include "xfs_rw.h" | 38 | #include "xfs_rw.h" |
39 | #include "xfs_iomap.h" | 39 | #include "xfs_iomap.h" |
40 | #include "xfs_vnodeops.h" | 40 | #include "xfs_vnodeops.h" |
41 | #include "xfs_trace.h" | ||
42 | #include "xfs_bmap.h" | ||
43 | #include <linux/gfp.h> | ||
41 | #include <linux/mpage.h> | 44 | #include <linux/mpage.h> |
42 | #include <linux/pagevec.h> | 45 | #include <linux/pagevec.h> |
43 | #include <linux/writeback.h> | 46 | #include <linux/writeback.h> |
@@ -76,7 +79,7 @@ xfs_ioend_wake( | |||
76 | wake_up(to_ioend_wq(ip)); | 79 | wake_up(to_ioend_wq(ip)); |
77 | } | 80 | } |
78 | 81 | ||
79 | STATIC void | 82 | void |
80 | xfs_count_page_state( | 83 | xfs_count_page_state( |
81 | struct page *page, | 84 | struct page *page, |
82 | int *delalloc, | 85 | int *delalloc, |
@@ -98,48 +101,6 @@ xfs_count_page_state( | |||
98 | } while ((bh = bh->b_this_page) != head); | 101 | } while ((bh = bh->b_this_page) != head); |
99 | } | 102 | } |
100 | 103 | ||
101 | #if defined(XFS_RW_TRACE) | ||
102 | void | ||
103 | xfs_page_trace( | ||
104 | int tag, | ||
105 | struct inode *inode, | ||
106 | struct page *page, | ||
107 | unsigned long pgoff) | ||
108 | { | ||
109 | xfs_inode_t *ip; | ||
110 | loff_t isize = i_size_read(inode); | ||
111 | loff_t offset = page_offset(page); | ||
112 | int delalloc = -1, unmapped = -1, unwritten = -1; | ||
113 | |||
114 | if (page_has_buffers(page)) | ||
115 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | ||
116 | |||
117 | ip = XFS_I(inode); | ||
118 | if (!ip->i_rwtrace) | ||
119 | return; | ||
120 | |||
121 | ktrace_enter(ip->i_rwtrace, | ||
122 | (void *)((unsigned long)tag), | ||
123 | (void *)ip, | ||
124 | (void *)inode, | ||
125 | (void *)page, | ||
126 | (void *)pgoff, | ||
127 | (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), | ||
128 | (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), | ||
129 | (void *)((unsigned long)((isize >> 32) & 0xffffffff)), | ||
130 | (void *)((unsigned long)(isize & 0xffffffff)), | ||
131 | (void *)((unsigned long)((offset >> 32) & 0xffffffff)), | ||
132 | (void *)((unsigned long)(offset & 0xffffffff)), | ||
133 | (void *)((unsigned long)delalloc), | ||
134 | (void *)((unsigned long)unmapped), | ||
135 | (void *)((unsigned long)unwritten), | ||
136 | (void *)((unsigned long)current_pid()), | ||
137 | (void *)NULL); | ||
138 | } | ||
139 | #else | ||
140 | #define xfs_page_trace(tag, inode, page, pgoff) | ||
141 | #endif | ||
142 | |||
143 | STATIC struct block_device * | 104 | STATIC struct block_device * |
144 | xfs_find_bdev_for_inode( | 105 | xfs_find_bdev_for_inode( |
145 | struct xfs_inode *ip) | 106 | struct xfs_inode *ip) |
@@ -204,14 +165,17 @@ xfs_ioend_new_eof( | |||
204 | } | 165 | } |
205 | 166 | ||
206 | /* | 167 | /* |
207 | * Update on-disk file size now that data has been written to disk. | 168 | * Update on-disk file size now that data has been written to disk. The |
208 | * The current in-memory file size is i_size. If a write is beyond | 169 | * current in-memory file size is i_size. If a write is beyond eof i_new_size |
209 | * eof i_new_size will be the intended file size until i_size is | 170 | * will be the intended file size until i_size is updated. If this write does |
210 | * updated. If this write does not extend all the way to the valid | 171 | * not extend all the way to the valid file size then restrict this update to |
211 | * file size then restrict this update to the end of the write. | 172 | * the end of the write. |
173 | * | ||
174 | * This function does not block as blocking on the inode lock in IO completion | ||
175 | * can lead to IO completion order dependency deadlocks.. If it can't get the | ||
176 | * inode ilock it will return EAGAIN. Callers must handle this. | ||
212 | */ | 177 | */ |
213 | 178 | STATIC int | |
214 | STATIC void | ||
215 | xfs_setfilesize( | 179 | xfs_setfilesize( |
216 | xfs_ioend_t *ioend) | 180 | xfs_ioend_t *ioend) |
217 | { | 181 | { |
@@ -222,85 +186,19 @@ xfs_setfilesize( | |||
222 | ASSERT(ioend->io_type != IOMAP_READ); | 186 | ASSERT(ioend->io_type != IOMAP_READ); |
223 | 187 | ||
224 | if (unlikely(ioend->io_error)) | 188 | if (unlikely(ioend->io_error)) |
225 | return; | 189 | return 0; |
190 | |||
191 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) | ||
192 | return EAGAIN; | ||
226 | 193 | ||
227 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
228 | isize = xfs_ioend_new_eof(ioend); | 194 | isize = xfs_ioend_new_eof(ioend); |
229 | if (isize) { | 195 | if (isize) { |
230 | ip->i_d.di_size = isize; | 196 | ip->i_d.di_size = isize; |
231 | xfs_mark_inode_dirty_sync(ip); | 197 | xfs_mark_inode_dirty(ip); |
232 | } | 198 | } |
233 | 199 | ||
234 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 200 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
235 | } | 201 | return 0; |
236 | |||
237 | /* | ||
238 | * Buffered IO write completion for delayed allocate extents. | ||
239 | */ | ||
240 | STATIC void | ||
241 | xfs_end_bio_delalloc( | ||
242 | struct work_struct *work) | ||
243 | { | ||
244 | xfs_ioend_t *ioend = | ||
245 | container_of(work, xfs_ioend_t, io_work); | ||
246 | |||
247 | xfs_setfilesize(ioend); | ||
248 | xfs_destroy_ioend(ioend); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Buffered IO write completion for regular, written extents. | ||
253 | */ | ||
254 | STATIC void | ||
255 | xfs_end_bio_written( | ||
256 | struct work_struct *work) | ||
257 | { | ||
258 | xfs_ioend_t *ioend = | ||
259 | container_of(work, xfs_ioend_t, io_work); | ||
260 | |||
261 | xfs_setfilesize(ioend); | ||
262 | xfs_destroy_ioend(ioend); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * IO write completion for unwritten extents. | ||
267 | * | ||
268 | * Issue transactions to convert a buffer range from unwritten | ||
269 | * to written extents. | ||
270 | */ | ||
271 | STATIC void | ||
272 | xfs_end_bio_unwritten( | ||
273 | struct work_struct *work) | ||
274 | { | ||
275 | xfs_ioend_t *ioend = | ||
276 | container_of(work, xfs_ioend_t, io_work); | ||
277 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | ||
278 | xfs_off_t offset = ioend->io_offset; | ||
279 | size_t size = ioend->io_size; | ||
280 | |||
281 | if (likely(!ioend->io_error)) { | ||
282 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
283 | int error; | ||
284 | error = xfs_iomap_write_unwritten(ip, offset, size); | ||
285 | if (error) | ||
286 | ioend->io_error = error; | ||
287 | } | ||
288 | xfs_setfilesize(ioend); | ||
289 | } | ||
290 | xfs_destroy_ioend(ioend); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * IO read completion for regular, written extents. | ||
295 | */ | ||
296 | STATIC void | ||
297 | xfs_end_bio_read( | ||
298 | struct work_struct *work) | ||
299 | { | ||
300 | xfs_ioend_t *ioend = | ||
301 | container_of(work, xfs_ioend_t, io_work); | ||
302 | |||
303 | xfs_destroy_ioend(ioend); | ||
304 | } | 202 | } |
305 | 203 | ||
306 | /* | 204 | /* |
@@ -314,10 +212,10 @@ xfs_finish_ioend( | |||
314 | int wait) | 212 | int wait) |
315 | { | 213 | { |
316 | if (atomic_dec_and_test(&ioend->io_remaining)) { | 214 | if (atomic_dec_and_test(&ioend->io_remaining)) { |
317 | struct workqueue_struct *wq = xfsdatad_workqueue; | 215 | struct workqueue_struct *wq; |
318 | if (ioend->io_work.func == xfs_end_bio_unwritten) | ||
319 | wq = xfsconvertd_workqueue; | ||
320 | 216 | ||
217 | wq = (ioend->io_type == IOMAP_UNWRITTEN) ? | ||
218 | xfsconvertd_workqueue : xfsdatad_workqueue; | ||
321 | queue_work(wq, &ioend->io_work); | 219 | queue_work(wq, &ioend->io_work); |
322 | if (wait) | 220 | if (wait) |
323 | flush_workqueue(wq); | 221 | flush_workqueue(wq); |
@@ -325,6 +223,53 @@ xfs_finish_ioend( | |||
325 | } | 223 | } |
326 | 224 | ||
327 | /* | 225 | /* |
226 | * IO write completion. | ||
227 | */ | ||
228 | STATIC void | ||
229 | xfs_end_io( | ||
230 | struct work_struct *work) | ||
231 | { | ||
232 | xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); | ||
233 | struct xfs_inode *ip = XFS_I(ioend->io_inode); | ||
234 | int error = 0; | ||
235 | |||
236 | /* | ||
237 | * For unwritten extents we need to issue transactions to convert a | ||
238 | * range to normal written extens after the data I/O has finished. | ||
239 | */ | ||
240 | if (ioend->io_type == IOMAP_UNWRITTEN && | ||
241 | likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { | ||
242 | |||
243 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, | ||
244 | ioend->io_size); | ||
245 | if (error) | ||
246 | ioend->io_error = error; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * We might have to update the on-disk file size after extending | ||
251 | * writes. | ||
252 | */ | ||
253 | if (ioend->io_type != IOMAP_READ) { | ||
254 | error = xfs_setfilesize(ioend); | ||
255 | ASSERT(!error || error == EAGAIN); | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * If we didn't complete processing of the ioend, requeue it to the | ||
260 | * tail of the workqueue for another attempt later. Otherwise destroy | ||
261 | * it. | ||
262 | */ | ||
263 | if (error == EAGAIN) { | ||
264 | atomic_inc(&ioend->io_remaining); | ||
265 | xfs_finish_ioend(ioend, 0); | ||
266 | /* ensure we don't spin on blocked ioends */ | ||
267 | delay(1); | ||
268 | } else | ||
269 | xfs_destroy_ioend(ioend); | ||
270 | } | ||
271 | |||
272 | /* | ||
328 | * Allocate and initialise an IO completion structure. | 273 | * Allocate and initialise an IO completion structure. |
329 | * We need to track unwritten extent write completion here initially. | 274 | * We need to track unwritten extent write completion here initially. |
330 | * We'll need to extend this for updating the ondisk inode size later | 275 | * We'll need to extend this for updating the ondisk inode size later |
@@ -355,15 +300,7 @@ xfs_alloc_ioend( | |||
355 | ioend->io_offset = 0; | 300 | ioend->io_offset = 0; |
356 | ioend->io_size = 0; | 301 | ioend->io_size = 0; |
357 | 302 | ||
358 | if (type == IOMAP_UNWRITTEN) | 303 | INIT_WORK(&ioend->io_work, xfs_end_io); |
359 | INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); | ||
360 | else if (type == IOMAP_DELAY) | ||
361 | INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); | ||
362 | else if (type == IOMAP_READ) | ||
363 | INIT_WORK(&ioend->io_work, xfs_end_bio_read); | ||
364 | else | ||
365 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); | ||
366 | |||
367 | return ioend; | 304 | return ioend; |
368 | } | 305 | } |
369 | 306 | ||
@@ -380,7 +317,7 @@ xfs_map_blocks( | |||
380 | return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); | 317 | return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); |
381 | } | 318 | } |
382 | 319 | ||
383 | STATIC_INLINE int | 320 | STATIC int |
384 | xfs_iomap_valid( | 321 | xfs_iomap_valid( |
385 | xfs_iomap_t *iomapp, | 322 | xfs_iomap_t *iomapp, |
386 | loff_t offset) | 323 | loff_t offset) |
@@ -412,8 +349,9 @@ xfs_end_bio( | |||
412 | 349 | ||
413 | STATIC void | 350 | STATIC void |
414 | xfs_submit_ioend_bio( | 351 | xfs_submit_ioend_bio( |
415 | xfs_ioend_t *ioend, | 352 | struct writeback_control *wbc, |
416 | struct bio *bio) | 353 | xfs_ioend_t *ioend, |
354 | struct bio *bio) | ||
417 | { | 355 | { |
418 | atomic_inc(&ioend->io_remaining); | 356 | atomic_inc(&ioend->io_remaining); |
419 | bio->bi_private = ioend; | 357 | bio->bi_private = ioend; |
@@ -424,9 +362,10 @@ xfs_submit_ioend_bio( | |||
424 | * but don't update the inode size until I/O completion. | 362 | * but don't update the inode size until I/O completion. |
425 | */ | 363 | */ |
426 | if (xfs_ioend_new_eof(ioend)) | 364 | if (xfs_ioend_new_eof(ioend)) |
427 | xfs_mark_inode_dirty_sync(XFS_I(ioend->io_inode)); | 365 | xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); |
428 | 366 | ||
429 | submit_bio(WRITE, bio); | 367 | submit_bio(wbc->sync_mode == WB_SYNC_ALL ? |
368 | WRITE_SYNC_PLUG : WRITE, bio); | ||
430 | ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); | 369 | ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); |
431 | bio_put(bio); | 370 | bio_put(bio); |
432 | } | 371 | } |
@@ -505,6 +444,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) | |||
505 | */ | 444 | */ |
506 | STATIC void | 445 | STATIC void |
507 | xfs_submit_ioend( | 446 | xfs_submit_ioend( |
447 | struct writeback_control *wbc, | ||
508 | xfs_ioend_t *ioend) | 448 | xfs_ioend_t *ioend) |
509 | { | 449 | { |
510 | xfs_ioend_t *head = ioend; | 450 | xfs_ioend_t *head = ioend; |
@@ -533,19 +473,19 @@ xfs_submit_ioend( | |||
533 | retry: | 473 | retry: |
534 | bio = xfs_alloc_ioend_bio(bh); | 474 | bio = xfs_alloc_ioend_bio(bh); |
535 | } else if (bh->b_blocknr != lastblock + 1) { | 475 | } else if (bh->b_blocknr != lastblock + 1) { |
536 | xfs_submit_ioend_bio(ioend, bio); | 476 | xfs_submit_ioend_bio(wbc, ioend, bio); |
537 | goto retry; | 477 | goto retry; |
538 | } | 478 | } |
539 | 479 | ||
540 | if (bio_add_buffer(bio, bh) != bh->b_size) { | 480 | if (bio_add_buffer(bio, bh) != bh->b_size) { |
541 | xfs_submit_ioend_bio(ioend, bio); | 481 | xfs_submit_ioend_bio(wbc, ioend, bio); |
542 | goto retry; | 482 | goto retry; |
543 | } | 483 | } |
544 | 484 | ||
545 | lastblock = bh->b_blocknr; | 485 | lastblock = bh->b_blocknr; |
546 | } | 486 | } |
547 | if (bio) | 487 | if (bio) |
548 | xfs_submit_ioend_bio(ioend, bio); | 488 | xfs_submit_ioend_bio(wbc, ioend, bio); |
549 | xfs_finish_ioend(ioend, 0); | 489 | xfs_finish_ioend(ioend, 0); |
550 | } while ((ioend = next) != NULL); | 490 | } while ((ioend = next) != NULL); |
551 | } | 491 | } |
@@ -904,16 +844,9 @@ xfs_convert_page( | |||
904 | 844 | ||
905 | if (startio) { | 845 | if (startio) { |
906 | if (count) { | 846 | if (count) { |
907 | struct backing_dev_info *bdi; | ||
908 | |||
909 | bdi = inode->i_mapping->backing_dev_info; | ||
910 | wbc->nr_to_write--; | 847 | wbc->nr_to_write--; |
911 | if (bdi_write_congested(bdi)) { | 848 | if (wbc->nr_to_write <= 0) |
912 | wbc->encountered_congestion = 1; | ||
913 | done = 1; | ||
914 | } else if (wbc->nr_to_write <= 0) { | ||
915 | done = 1; | 849 | done = 1; |
916 | } | ||
917 | } | 850 | } |
918 | xfs_start_page_writeback(page, !page_dirty, count); | 851 | xfs_start_page_writeback(page, !page_dirty, count); |
919 | } | 852 | } |
@@ -962,6 +895,125 @@ xfs_cluster_write( | |||
962 | } | 895 | } |
963 | } | 896 | } |
964 | 897 | ||
898 | STATIC void | ||
899 | xfs_vm_invalidatepage( | ||
900 | struct page *page, | ||
901 | unsigned long offset) | ||
902 | { | ||
903 | trace_xfs_invalidatepage(page->mapping->host, page, offset); | ||
904 | block_invalidatepage(page, offset); | ||
905 | } | ||
906 | |||
907 | /* | ||
908 | * If the page has delalloc buffers on it, we need to punch them out before we | ||
909 | * invalidate the page. If we don't, we leave a stale delalloc mapping on the | ||
910 | * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read | ||
911 | * is done on that same region - the delalloc extent is returned when none is | ||
912 | * supposed to be there. | ||
913 | * | ||
914 | * We prevent this by truncating away the delalloc regions on the page before | ||
915 | * invalidating it. Because they are delalloc, we can do this without needing a | ||
916 | * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this | ||
917 | * truncation without a transaction as there is no space left for block | ||
918 | * reservation (typically why we see a ENOSPC in writeback). | ||
919 | * | ||
920 | * This is not a performance critical path, so for now just do the punching a | ||
921 | * buffer head at a time. | ||
922 | */ | ||
923 | STATIC void | ||
924 | xfs_aops_discard_page( | ||
925 | struct page *page) | ||
926 | { | ||
927 | struct inode *inode = page->mapping->host; | ||
928 | struct xfs_inode *ip = XFS_I(inode); | ||
929 | struct buffer_head *bh, *head; | ||
930 | loff_t offset = page_offset(page); | ||
931 | ssize_t len = 1 << inode->i_blkbits; | ||
932 | |||
933 | if (!xfs_is_delayed_page(page, IOMAP_DELAY)) | ||
934 | goto out_invalidate; | ||
935 | |||
936 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | ||
937 | goto out_invalidate; | ||
938 | |||
939 | xfs_fs_cmn_err(CE_ALERT, ip->i_mount, | ||
940 | "page discard on page %p, inode 0x%llx, offset %llu.", | ||
941 | page, ip->i_ino, offset); | ||
942 | |||
943 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
944 | bh = head = page_buffers(page); | ||
945 | do { | ||
946 | int done; | ||
947 | xfs_fileoff_t offset_fsb; | ||
948 | xfs_bmbt_irec_t imap; | ||
949 | int nimaps = 1; | ||
950 | int error; | ||
951 | xfs_fsblock_t firstblock; | ||
952 | xfs_bmap_free_t flist; | ||
953 | |||
954 | if (!buffer_delay(bh)) | ||
955 | goto next_buffer; | ||
956 | |||
957 | offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); | ||
958 | |||
959 | /* | ||
960 | * Map the range first and check that it is a delalloc extent | ||
961 | * before trying to unmap the range. Otherwise we will be | ||
962 | * trying to remove a real extent (which requires a | ||
963 | * transaction) or a hole, which is probably a bad idea... | ||
964 | */ | ||
965 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, | ||
966 | XFS_BMAPI_ENTIRE, NULL, 0, &imap, | ||
967 | &nimaps, NULL, NULL); | ||
968 | |||
969 | if (error) { | ||
970 | /* something screwed, just bail */ | ||
971 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
972 | xfs_fs_cmn_err(CE_ALERT, ip->i_mount, | ||
973 | "page discard failed delalloc mapping lookup."); | ||
974 | } | ||
975 | break; | ||
976 | } | ||
977 | if (!nimaps) { | ||
978 | /* nothing there */ | ||
979 | goto next_buffer; | ||
980 | } | ||
981 | if (imap.br_startblock != DELAYSTARTBLOCK) { | ||
982 | /* been converted, ignore */ | ||
983 | goto next_buffer; | ||
984 | } | ||
985 | WARN_ON(imap.br_blockcount == 0); | ||
986 | |||
987 | /* | ||
988 | * Note: while we initialise the firstblock/flist pair, they | ||
989 | * should never be used because blocks should never be | ||
990 | * allocated or freed for a delalloc extent and hence we need | ||
991 | * don't cancel or finish them after the xfs_bunmapi() call. | ||
992 | */ | ||
993 | xfs_bmap_init(&flist, &firstblock); | ||
994 | error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, | ||
995 | &flist, NULL, &done); | ||
996 | |||
997 | ASSERT(!flist.xbf_count && !flist.xbf_first); | ||
998 | if (error) { | ||
999 | /* something screwed, just bail */ | ||
1000 | if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { | ||
1001 | xfs_fs_cmn_err(CE_ALERT, ip->i_mount, | ||
1002 | "page discard unable to remove delalloc mapping."); | ||
1003 | } | ||
1004 | break; | ||
1005 | } | ||
1006 | next_buffer: | ||
1007 | offset += len; | ||
1008 | |||
1009 | } while ((bh = bh->b_this_page) != head); | ||
1010 | |||
1011 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1012 | out_invalidate: | ||
1013 | xfs_vm_invalidatepage(page, 0); | ||
1014 | return; | ||
1015 | } | ||
1016 | |||
965 | /* | 1017 | /* |
966 | * Calling this without startio set means we are being asked to make a dirty | 1018 | * Calling this without startio set means we are being asked to make a dirty |
967 | * page ready for freeing it's buffers. When called with startio set then | 1019 | * page ready for freeing it's buffers. When called with startio set then |
@@ -1198,7 +1250,7 @@ xfs_page_state_convert( | |||
1198 | } | 1250 | } |
1199 | 1251 | ||
1200 | if (iohead) | 1252 | if (iohead) |
1201 | xfs_submit_ioend(iohead); | 1253 | xfs_submit_ioend(wbc, iohead); |
1202 | 1254 | ||
1203 | return page_dirty; | 1255 | return page_dirty; |
1204 | 1256 | ||
@@ -1213,7 +1265,7 @@ error: | |||
1213 | */ | 1265 | */ |
1214 | if (err != -EAGAIN) { | 1266 | if (err != -EAGAIN) { |
1215 | if (!unmapped) | 1267 | if (!unmapped) |
1216 | block_invalidatepage(page, 0); | 1268 | xfs_aops_discard_page(page); |
1217 | ClearPageUptodate(page); | 1269 | ClearPageUptodate(page); |
1218 | } | 1270 | } |
1219 | return err; | 1271 | return err; |
@@ -1249,7 +1301,7 @@ xfs_vm_writepage( | |||
1249 | int delalloc, unmapped, unwritten; | 1301 | int delalloc, unmapped, unwritten; |
1250 | struct inode *inode = page->mapping->host; | 1302 | struct inode *inode = page->mapping->host; |
1251 | 1303 | ||
1252 | xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0); | 1304 | trace_xfs_writepage(inode, page, 0); |
1253 | 1305 | ||
1254 | /* | 1306 | /* |
1255 | * We need a transaction if: | 1307 | * We need a transaction if: |
@@ -1354,7 +1406,7 @@ xfs_vm_releasepage( | |||
1354 | .nr_to_write = 1, | 1406 | .nr_to_write = 1, |
1355 | }; | 1407 | }; |
1356 | 1408 | ||
1357 | xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, 0); | 1409 | trace_xfs_releasepage(inode, page, 0); |
1358 | 1410 | ||
1359 | if (!page_has_buffers(page)) | 1411 | if (!page_has_buffers(page)) |
1360 | return 0; | 1412 | return 0; |
@@ -1535,7 +1587,7 @@ xfs_end_io_direct( | |||
1535 | * didn't map an unwritten extent so switch it's completion | 1587 | * didn't map an unwritten extent so switch it's completion |
1536 | * handler. | 1588 | * handler. |
1537 | */ | 1589 | */ |
1538 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); | 1590 | ioend->io_type = IOMAP_NEW; |
1539 | xfs_finish_ioend(ioend, 0); | 1591 | xfs_finish_ioend(ioend, 0); |
1540 | } | 1592 | } |
1541 | 1593 | ||
@@ -1562,19 +1614,13 @@ xfs_vm_direct_IO( | |||
1562 | 1614 | ||
1563 | bdev = xfs_find_bdev_for_inode(XFS_I(inode)); | 1615 | bdev = xfs_find_bdev_for_inode(XFS_I(inode)); |
1564 | 1616 | ||
1565 | if (rw == WRITE) { | 1617 | iocb->private = xfs_alloc_ioend(inode, rw == WRITE ? |
1566 | iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); | 1618 | IOMAP_UNWRITTEN : IOMAP_READ); |
1567 | ret = blockdev_direct_IO_own_locking(rw, iocb, inode, | 1619 | |
1568 | bdev, iov, offset, nr_segs, | 1620 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, |
1569 | xfs_get_blocks_direct, | 1621 | offset, nr_segs, |
1570 | xfs_end_io_direct); | 1622 | xfs_get_blocks_direct, |
1571 | } else { | 1623 | xfs_end_io_direct); |
1572 | iocb->private = xfs_alloc_ioend(inode, IOMAP_READ); | ||
1573 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | ||
1574 | bdev, iov, offset, nr_segs, | ||
1575 | xfs_get_blocks_direct, | ||
1576 | xfs_end_io_direct); | ||
1577 | } | ||
1578 | 1624 | ||
1579 | if (unlikely(ret != -EIOCBQUEUED && iocb->private)) | 1625 | if (unlikely(ret != -EIOCBQUEUED && iocb->private)) |
1580 | xfs_destroy_ioend(iocb->private); | 1626 | xfs_destroy_ioend(iocb->private); |
@@ -1629,16 +1675,6 @@ xfs_vm_readpages( | |||
1629 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); | 1675 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); |
1630 | } | 1676 | } |
1631 | 1677 | ||
1632 | STATIC void | ||
1633 | xfs_vm_invalidatepage( | ||
1634 | struct page *page, | ||
1635 | unsigned long offset) | ||
1636 | { | ||
1637 | xfs_page_trace(XFS_INVALIDPAGE_ENTER, | ||
1638 | page->mapping->host, page, offset); | ||
1639 | block_invalidatepage(page, offset); | ||
1640 | } | ||
1641 | |||
1642 | const struct address_space_operations xfs_address_space_operations = { | 1678 | const struct address_space_operations xfs_address_space_operations = { |
1643 | .readpage = xfs_vm_readpage, | 1679 | .readpage = xfs_vm_readpage, |
1644 | .readpages = xfs_vm_readpages, | 1680 | .readpages = xfs_vm_readpages, |