diff options
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 851 |
1 files changed, 378 insertions, 473 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 0f8b9968a803..15412fe15c3a 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -21,19 +21,12 @@ | |||
21 | #include "xfs_inum.h" | 21 | #include "xfs_inum.h" |
22 | #include "xfs_sb.h" | 22 | #include "xfs_sb.h" |
23 | #include "xfs_ag.h" | 23 | #include "xfs_ag.h" |
24 | #include "xfs_dir2.h" | ||
25 | #include "xfs_trans.h" | 24 | #include "xfs_trans.h" |
26 | #include "xfs_dmapi.h" | ||
27 | #include "xfs_mount.h" | 25 | #include "xfs_mount.h" |
28 | #include "xfs_bmap_btree.h" | 26 | #include "xfs_bmap_btree.h" |
29 | #include "xfs_alloc_btree.h" | ||
30 | #include "xfs_ialloc_btree.h" | ||
31 | #include "xfs_dir2_sf.h" | ||
32 | #include "xfs_attr_sf.h" | ||
33 | #include "xfs_dinode.h" | 27 | #include "xfs_dinode.h" |
34 | #include "xfs_inode.h" | 28 | #include "xfs_inode.h" |
35 | #include "xfs_alloc.h" | 29 | #include "xfs_alloc.h" |
36 | #include "xfs_btree.h" | ||
37 | #include "xfs_error.h" | 30 | #include "xfs_error.h" |
38 | #include "xfs_rw.h" | 31 | #include "xfs_rw.h" |
39 | #include "xfs_iomap.h" | 32 | #include "xfs_iomap.h" |
@@ -45,6 +38,15 @@ | |||
45 | #include <linux/pagevec.h> | 38 | #include <linux/pagevec.h> |
46 | #include <linux/writeback.h> | 39 | #include <linux/writeback.h> |
47 | 40 | ||
41 | /* | ||
42 | * Types of I/O for bmap clustering and I/O completion tracking. | ||
43 | */ | ||
44 | enum { | ||
45 | IO_READ, /* mapping for a read */ | ||
46 | IO_DELAY, /* mapping covers delalloc region */ | ||
47 | IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ | ||
48 | IO_NEW /* just allocated */ | ||
49 | }; | ||
48 | 50 | ||
49 | /* | 51 | /* |
50 | * Prime number of hash buckets since address is used as the key. | 52 | * Prime number of hash buckets since address is used as the key. |
@@ -83,18 +85,15 @@ void | |||
83 | xfs_count_page_state( | 85 | xfs_count_page_state( |
84 | struct page *page, | 86 | struct page *page, |
85 | int *delalloc, | 87 | int *delalloc, |
86 | int *unmapped, | ||
87 | int *unwritten) | 88 | int *unwritten) |
88 | { | 89 | { |
89 | struct buffer_head *bh, *head; | 90 | struct buffer_head *bh, *head; |
90 | 91 | ||
91 | *delalloc = *unmapped = *unwritten = 0; | 92 | *delalloc = *unwritten = 0; |
92 | 93 | ||
93 | bh = head = page_buffers(page); | 94 | bh = head = page_buffers(page); |
94 | do { | 95 | do { |
95 | if (buffer_uptodate(bh) && !buffer_mapped(bh)) | 96 | if (buffer_unwritten(bh)) |
96 | (*unmapped) = 1; | ||
97 | else if (buffer_unwritten(bh)) | ||
98 | (*unwritten) = 1; | 97 | (*unwritten) = 1; |
99 | else if (buffer_delay(bh)) | 98 | else if (buffer_delay(bh)) |
100 | (*delalloc) = 1; | 99 | (*delalloc) = 1; |
@@ -103,8 +102,9 @@ xfs_count_page_state( | |||
103 | 102 | ||
104 | STATIC struct block_device * | 103 | STATIC struct block_device * |
105 | xfs_find_bdev_for_inode( | 104 | xfs_find_bdev_for_inode( |
106 | struct xfs_inode *ip) | 105 | struct inode *inode) |
107 | { | 106 | { |
107 | struct xfs_inode *ip = XFS_I(inode); | ||
108 | struct xfs_mount *mp = ip->i_mount; | 108 | struct xfs_mount *mp = ip->i_mount; |
109 | 109 | ||
110 | if (XFS_IS_REALTIME_INODE(ip)) | 110 | if (XFS_IS_REALTIME_INODE(ip)) |
@@ -183,7 +183,7 @@ xfs_setfilesize( | |||
183 | xfs_fsize_t isize; | 183 | xfs_fsize_t isize; |
184 | 184 | ||
185 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); | 185 | ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); |
186 | ASSERT(ioend->io_type != IOMAP_READ); | 186 | ASSERT(ioend->io_type != IO_READ); |
187 | 187 | ||
188 | if (unlikely(ioend->io_error)) | 188 | if (unlikely(ioend->io_error)) |
189 | return 0; | 189 | return 0; |
@@ -202,23 +202,17 @@ xfs_setfilesize( | |||
202 | } | 202 | } |
203 | 203 | ||
204 | /* | 204 | /* |
205 | * Schedule IO completion handling on a xfsdatad if this was | 205 | * Schedule IO completion handling on the final put of an ioend. |
206 | * the final hold on this ioend. If we are asked to wait, | ||
207 | * flush the workqueue. | ||
208 | */ | 206 | */ |
209 | STATIC void | 207 | STATIC void |
210 | xfs_finish_ioend( | 208 | xfs_finish_ioend( |
211 | xfs_ioend_t *ioend, | 209 | struct xfs_ioend *ioend) |
212 | int wait) | ||
213 | { | 210 | { |
214 | if (atomic_dec_and_test(&ioend->io_remaining)) { | 211 | if (atomic_dec_and_test(&ioend->io_remaining)) { |
215 | struct workqueue_struct *wq; | 212 | if (ioend->io_type == IO_UNWRITTEN) |
216 | 213 | queue_work(xfsconvertd_workqueue, &ioend->io_work); | |
217 | wq = (ioend->io_type == IOMAP_UNWRITTEN) ? | 214 | else |
218 | xfsconvertd_workqueue : xfsdatad_workqueue; | 215 | queue_work(xfsdatad_workqueue, &ioend->io_work); |
219 | queue_work(wq, &ioend->io_work); | ||
220 | if (wait) | ||
221 | flush_workqueue(wq); | ||
222 | } | 216 | } |
223 | } | 217 | } |
224 | 218 | ||
@@ -237,7 +231,7 @@ xfs_end_io( | |||
237 | * For unwritten extents we need to issue transactions to convert a | 231 | * For unwritten extents we need to issue transactions to convert a |
238 | * range to normal written extens after the data I/O has finished. | 232 | * range to normal written extens after the data I/O has finished. |
239 | */ | 233 | */ |
240 | if (ioend->io_type == IOMAP_UNWRITTEN && | 234 | if (ioend->io_type == IO_UNWRITTEN && |
241 | likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { | 235 | likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { |
242 | 236 | ||
243 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, | 237 | error = xfs_iomap_write_unwritten(ip, ioend->io_offset, |
@@ -250,7 +244,7 @@ xfs_end_io( | |||
250 | * We might have to update the on-disk file size after extending | 244 | * We might have to update the on-disk file size after extending |
251 | * writes. | 245 | * writes. |
252 | */ | 246 | */ |
253 | if (ioend->io_type != IOMAP_READ) { | 247 | if (ioend->io_type != IO_READ) { |
254 | error = xfs_setfilesize(ioend); | 248 | error = xfs_setfilesize(ioend); |
255 | ASSERT(!error || error == EAGAIN); | 249 | ASSERT(!error || error == EAGAIN); |
256 | } | 250 | } |
@@ -262,11 +256,25 @@ xfs_end_io( | |||
262 | */ | 256 | */ |
263 | if (error == EAGAIN) { | 257 | if (error == EAGAIN) { |
264 | atomic_inc(&ioend->io_remaining); | 258 | atomic_inc(&ioend->io_remaining); |
265 | xfs_finish_ioend(ioend, 0); | 259 | xfs_finish_ioend(ioend); |
266 | /* ensure we don't spin on blocked ioends */ | 260 | /* ensure we don't spin on blocked ioends */ |
267 | delay(1); | 261 | delay(1); |
268 | } else | 262 | } else { |
263 | if (ioend->io_iocb) | ||
264 | aio_complete(ioend->io_iocb, ioend->io_result, 0); | ||
269 | xfs_destroy_ioend(ioend); | 265 | xfs_destroy_ioend(ioend); |
266 | } | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * Call IO completion handling in caller context on the final put of an ioend. | ||
271 | */ | ||
272 | STATIC void | ||
273 | xfs_finish_ioend_sync( | ||
274 | struct xfs_ioend *ioend) | ||
275 | { | ||
276 | if (atomic_dec_and_test(&ioend->io_remaining)) | ||
277 | xfs_end_io(&ioend->io_work); | ||
270 | } | 278 | } |
271 | 279 | ||
272 | /* | 280 | /* |
@@ -299,6 +307,8 @@ xfs_alloc_ioend( | |||
299 | atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); | 307 | atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); |
300 | ioend->io_offset = 0; | 308 | ioend->io_offset = 0; |
301 | ioend->io_size = 0; | 309 | ioend->io_size = 0; |
310 | ioend->io_iocb = NULL; | ||
311 | ioend->io_result = 0; | ||
302 | 312 | ||
303 | INIT_WORK(&ioend->io_work, xfs_end_io); | 313 | INIT_WORK(&ioend->io_work, xfs_end_io); |
304 | return ioend; | 314 | return ioend; |
@@ -309,21 +319,25 @@ xfs_map_blocks( | |||
309 | struct inode *inode, | 319 | struct inode *inode, |
310 | loff_t offset, | 320 | loff_t offset, |
311 | ssize_t count, | 321 | ssize_t count, |
312 | xfs_iomap_t *mapp, | 322 | struct xfs_bmbt_irec *imap, |
313 | int flags) | 323 | int flags) |
314 | { | 324 | { |
315 | int nmaps = 1; | 325 | int nmaps = 1; |
326 | int new = 0; | ||
316 | 327 | ||
317 | return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); | 328 | return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new); |
318 | } | 329 | } |
319 | 330 | ||
320 | STATIC int | 331 | STATIC int |
321 | xfs_iomap_valid( | 332 | xfs_imap_valid( |
322 | xfs_iomap_t *iomapp, | 333 | struct inode *inode, |
323 | loff_t offset) | 334 | struct xfs_bmbt_irec *imap, |
335 | xfs_off_t offset) | ||
324 | { | 336 | { |
325 | return offset >= iomapp->iomap_offset && | 337 | offset >>= inode->i_blkbits; |
326 | offset < iomapp->iomap_offset + iomapp->iomap_bsize; | 338 | |
339 | return offset >= imap->br_startoff && | ||
340 | offset < imap->br_startoff + imap->br_blockcount; | ||
327 | } | 341 | } |
328 | 342 | ||
329 | /* | 343 | /* |
@@ -344,7 +358,7 @@ xfs_end_bio( | |||
344 | bio->bi_end_io = NULL; | 358 | bio->bi_end_io = NULL; |
345 | bio_put(bio); | 359 | bio_put(bio); |
346 | 360 | ||
347 | xfs_finish_ioend(ioend, 0); | 361 | xfs_finish_ioend(ioend); |
348 | } | 362 | } |
349 | 363 | ||
350 | STATIC void | 364 | STATIC void |
@@ -486,7 +500,7 @@ xfs_submit_ioend( | |||
486 | } | 500 | } |
487 | if (bio) | 501 | if (bio) |
488 | xfs_submit_ioend_bio(wbc, ioend, bio); | 502 | xfs_submit_ioend_bio(wbc, ioend, bio); |
489 | xfs_finish_ioend(ioend, 0); | 503 | xfs_finish_ioend(ioend); |
490 | } while ((ioend = next) != NULL); | 504 | } while ((ioend = next) != NULL); |
491 | } | 505 | } |
492 | 506 | ||
@@ -554,19 +568,23 @@ xfs_add_to_ioend( | |||
554 | 568 | ||
555 | STATIC void | 569 | STATIC void |
556 | xfs_map_buffer( | 570 | xfs_map_buffer( |
571 | struct inode *inode, | ||
557 | struct buffer_head *bh, | 572 | struct buffer_head *bh, |
558 | xfs_iomap_t *mp, | 573 | struct xfs_bmbt_irec *imap, |
559 | xfs_off_t offset, | 574 | xfs_off_t offset) |
560 | uint block_bits) | ||
561 | { | 575 | { |
562 | sector_t bn; | 576 | sector_t bn; |
577 | struct xfs_mount *m = XFS_I(inode)->i_mount; | ||
578 | xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); | ||
579 | xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); | ||
563 | 580 | ||
564 | ASSERT(mp->iomap_bn != IOMAP_DADDR_NULL); | 581 | ASSERT(imap->br_startblock != HOLESTARTBLOCK); |
582 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); | ||
565 | 583 | ||
566 | bn = (mp->iomap_bn >> (block_bits - BBSHIFT)) + | 584 | bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + |
567 | ((offset - mp->iomap_offset) >> block_bits); | 585 | ((offset - iomap_offset) >> inode->i_blkbits); |
568 | 586 | ||
569 | ASSERT(bn || (mp->iomap_flags & IOMAP_REALTIME)); | 587 | ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); |
570 | 588 | ||
571 | bh->b_blocknr = bn; | 589 | bh->b_blocknr = bn; |
572 | set_buffer_mapped(bh); | 590 | set_buffer_mapped(bh); |
@@ -574,17 +592,17 @@ xfs_map_buffer( | |||
574 | 592 | ||
575 | STATIC void | 593 | STATIC void |
576 | xfs_map_at_offset( | 594 | xfs_map_at_offset( |
595 | struct inode *inode, | ||
577 | struct buffer_head *bh, | 596 | struct buffer_head *bh, |
578 | loff_t offset, | 597 | struct xfs_bmbt_irec *imap, |
579 | int block_bits, | 598 | xfs_off_t offset) |
580 | xfs_iomap_t *iomapp) | ||
581 | { | 599 | { |
582 | ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); | 600 | ASSERT(imap->br_startblock != HOLESTARTBLOCK); |
583 | ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); | 601 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); |
584 | 602 | ||
585 | lock_buffer(bh); | 603 | lock_buffer(bh); |
586 | xfs_map_buffer(bh, iomapp, offset, block_bits); | 604 | xfs_map_buffer(inode, bh, imap, offset); |
587 | bh->b_bdev = iomapp->iomap_target->bt_bdev; | 605 | bh->b_bdev = xfs_find_bdev_for_inode(inode); |
588 | set_buffer_mapped(bh); | 606 | set_buffer_mapped(bh); |
589 | clear_buffer_delay(bh); | 607 | clear_buffer_delay(bh); |
590 | clear_buffer_unwritten(bh); | 608 | clear_buffer_unwritten(bh); |
@@ -596,31 +614,30 @@ xfs_map_at_offset( | |||
596 | STATIC unsigned int | 614 | STATIC unsigned int |
597 | xfs_probe_page( | 615 | xfs_probe_page( |
598 | struct page *page, | 616 | struct page *page, |
599 | unsigned int pg_offset, | 617 | unsigned int pg_offset) |
600 | int mapped) | ||
601 | { | 618 | { |
619 | struct buffer_head *bh, *head; | ||
602 | int ret = 0; | 620 | int ret = 0; |
603 | 621 | ||
604 | if (PageWriteback(page)) | 622 | if (PageWriteback(page)) |
605 | return 0; | 623 | return 0; |
624 | if (!PageDirty(page)) | ||
625 | return 0; | ||
626 | if (!page->mapping) | ||
627 | return 0; | ||
628 | if (!page_has_buffers(page)) | ||
629 | return 0; | ||
606 | 630 | ||
607 | if (page->mapping && PageDirty(page)) { | 631 | bh = head = page_buffers(page); |
608 | if (page_has_buffers(page)) { | 632 | do { |
609 | struct buffer_head *bh, *head; | 633 | if (!buffer_uptodate(bh)) |
610 | 634 | break; | |
611 | bh = head = page_buffers(page); | 635 | if (!buffer_mapped(bh)) |
612 | do { | 636 | break; |
613 | if (!buffer_uptodate(bh)) | 637 | ret += bh->b_size; |
614 | break; | 638 | if (ret >= pg_offset) |
615 | if (mapped != buffer_mapped(bh)) | 639 | break; |
616 | break; | 640 | } while ((bh = bh->b_this_page) != head); |
617 | ret += bh->b_size; | ||
618 | if (ret >= pg_offset) | ||
619 | break; | ||
620 | } while ((bh = bh->b_this_page) != head); | ||
621 | } else | ||
622 | ret = mapped ? 0 : PAGE_CACHE_SIZE; | ||
623 | } | ||
624 | 641 | ||
625 | return ret; | 642 | return ret; |
626 | } | 643 | } |
@@ -630,8 +647,7 @@ xfs_probe_cluster( | |||
630 | struct inode *inode, | 647 | struct inode *inode, |
631 | struct page *startpage, | 648 | struct page *startpage, |
632 | struct buffer_head *bh, | 649 | struct buffer_head *bh, |
633 | struct buffer_head *head, | 650 | struct buffer_head *head) |
634 | int mapped) | ||
635 | { | 651 | { |
636 | struct pagevec pvec; | 652 | struct pagevec pvec; |
637 | pgoff_t tindex, tlast, tloff; | 653 | pgoff_t tindex, tlast, tloff; |
@@ -640,7 +656,7 @@ xfs_probe_cluster( | |||
640 | 656 | ||
641 | /* First sum forwards in this page */ | 657 | /* First sum forwards in this page */ |
642 | do { | 658 | do { |
643 | if (!buffer_uptodate(bh) || (mapped != buffer_mapped(bh))) | 659 | if (!buffer_uptodate(bh) || !buffer_mapped(bh)) |
644 | return total; | 660 | return total; |
645 | total += bh->b_size; | 661 | total += bh->b_size; |
646 | } while ((bh = bh->b_this_page) != head); | 662 | } while ((bh = bh->b_this_page) != head); |
@@ -674,7 +690,7 @@ xfs_probe_cluster( | |||
674 | pg_offset = PAGE_CACHE_SIZE; | 690 | pg_offset = PAGE_CACHE_SIZE; |
675 | 691 | ||
676 | if (page->index == tindex && trylock_page(page)) { | 692 | if (page->index == tindex && trylock_page(page)) { |
677 | pg_len = xfs_probe_page(page, pg_offset, mapped); | 693 | pg_len = xfs_probe_page(page, pg_offset); |
678 | unlock_page(page); | 694 | unlock_page(page); |
679 | } | 695 | } |
680 | 696 | ||
@@ -713,11 +729,11 @@ xfs_is_delayed_page( | |||
713 | bh = head = page_buffers(page); | 729 | bh = head = page_buffers(page); |
714 | do { | 730 | do { |
715 | if (buffer_unwritten(bh)) | 731 | if (buffer_unwritten(bh)) |
716 | acceptable = (type == IOMAP_UNWRITTEN); | 732 | acceptable = (type == IO_UNWRITTEN); |
717 | else if (buffer_delay(bh)) | 733 | else if (buffer_delay(bh)) |
718 | acceptable = (type == IOMAP_DELAY); | 734 | acceptable = (type == IO_DELAY); |
719 | else if (buffer_dirty(bh) && buffer_mapped(bh)) | 735 | else if (buffer_dirty(bh) && buffer_mapped(bh)) |
720 | acceptable = (type == IOMAP_NEW); | 736 | acceptable = (type == IO_NEW); |
721 | else | 737 | else |
722 | break; | 738 | break; |
723 | } while ((bh = bh->b_this_page) != head); | 739 | } while ((bh = bh->b_this_page) != head); |
@@ -740,17 +756,15 @@ xfs_convert_page( | |||
740 | struct inode *inode, | 756 | struct inode *inode, |
741 | struct page *page, | 757 | struct page *page, |
742 | loff_t tindex, | 758 | loff_t tindex, |
743 | xfs_iomap_t *mp, | 759 | struct xfs_bmbt_irec *imap, |
744 | xfs_ioend_t **ioendp, | 760 | xfs_ioend_t **ioendp, |
745 | struct writeback_control *wbc, | 761 | struct writeback_control *wbc, |
746 | int startio, | ||
747 | int all_bh) | 762 | int all_bh) |
748 | { | 763 | { |
749 | struct buffer_head *bh, *head; | 764 | struct buffer_head *bh, *head; |
750 | xfs_off_t end_offset; | 765 | xfs_off_t end_offset; |
751 | unsigned long p_offset; | 766 | unsigned long p_offset; |
752 | unsigned int type; | 767 | unsigned int type; |
753 | int bbits = inode->i_blkbits; | ||
754 | int len, page_dirty; | 768 | int len, page_dirty; |
755 | int count = 0, done = 0, uptodate = 1; | 769 | int count = 0, done = 0, uptodate = 1; |
756 | xfs_off_t offset = page_offset(page); | 770 | xfs_off_t offset = page_offset(page); |
@@ -802,32 +816,27 @@ xfs_convert_page( | |||
802 | 816 | ||
803 | if (buffer_unwritten(bh) || buffer_delay(bh)) { | 817 | if (buffer_unwritten(bh) || buffer_delay(bh)) { |
804 | if (buffer_unwritten(bh)) | 818 | if (buffer_unwritten(bh)) |
805 | type = IOMAP_UNWRITTEN; | 819 | type = IO_UNWRITTEN; |
806 | else | 820 | else |
807 | type = IOMAP_DELAY; | 821 | type = IO_DELAY; |
808 | 822 | ||
809 | if (!xfs_iomap_valid(mp, offset)) { | 823 | if (!xfs_imap_valid(inode, imap, offset)) { |
810 | done = 1; | 824 | done = 1; |
811 | continue; | 825 | continue; |
812 | } | 826 | } |
813 | 827 | ||
814 | ASSERT(!(mp->iomap_flags & IOMAP_HOLE)); | 828 | ASSERT(imap->br_startblock != HOLESTARTBLOCK); |
815 | ASSERT(!(mp->iomap_flags & IOMAP_DELAY)); | 829 | ASSERT(imap->br_startblock != DELAYSTARTBLOCK); |
830 | |||
831 | xfs_map_at_offset(inode, bh, imap, offset); | ||
832 | xfs_add_to_ioend(inode, bh, offset, type, | ||
833 | ioendp, done); | ||
816 | 834 | ||
817 | xfs_map_at_offset(bh, offset, bbits, mp); | ||
818 | if (startio) { | ||
819 | xfs_add_to_ioend(inode, bh, offset, | ||
820 | type, ioendp, done); | ||
821 | } else { | ||
822 | set_buffer_dirty(bh); | ||
823 | unlock_buffer(bh); | ||
824 | mark_buffer_dirty(bh); | ||
825 | } | ||
826 | page_dirty--; | 835 | page_dirty--; |
827 | count++; | 836 | count++; |
828 | } else { | 837 | } else { |
829 | type = IOMAP_NEW; | 838 | type = IO_NEW; |
830 | if (buffer_mapped(bh) && all_bh && startio) { | 839 | if (buffer_mapped(bh) && all_bh) { |
831 | lock_buffer(bh); | 840 | lock_buffer(bh); |
832 | xfs_add_to_ioend(inode, bh, offset, | 841 | xfs_add_to_ioend(inode, bh, offset, |
833 | type, ioendp, done); | 842 | type, ioendp, done); |
@@ -842,14 +851,12 @@ xfs_convert_page( | |||
842 | if (uptodate && bh == head) | 851 | if (uptodate && bh == head) |
843 | SetPageUptodate(page); | 852 | SetPageUptodate(page); |
844 | 853 | ||
845 | if (startio) { | 854 | if (count) { |
846 | if (count) { | 855 | wbc->nr_to_write--; |
847 | wbc->nr_to_write--; | 856 | if (wbc->nr_to_write <= 0) |
848 | if (wbc->nr_to_write <= 0) | 857 | done = 1; |
849 | done = 1; | ||
850 | } | ||
851 | xfs_start_page_writeback(page, !page_dirty, count); | ||
852 | } | 858 | } |
859 | xfs_start_page_writeback(page, !page_dirty, count); | ||
853 | 860 | ||
854 | return done; | 861 | return done; |
855 | fail_unlock_page: | 862 | fail_unlock_page: |
@@ -866,10 +873,9 @@ STATIC void | |||
866 | xfs_cluster_write( | 873 | xfs_cluster_write( |
867 | struct inode *inode, | 874 | struct inode *inode, |
868 | pgoff_t tindex, | 875 | pgoff_t tindex, |
869 | xfs_iomap_t *iomapp, | 876 | struct xfs_bmbt_irec *imap, |
870 | xfs_ioend_t **ioendp, | 877 | xfs_ioend_t **ioendp, |
871 | struct writeback_control *wbc, | 878 | struct writeback_control *wbc, |
872 | int startio, | ||
873 | int all_bh, | 879 | int all_bh, |
874 | pgoff_t tlast) | 880 | pgoff_t tlast) |
875 | { | 881 | { |
@@ -885,7 +891,7 @@ xfs_cluster_write( | |||
885 | 891 | ||
886 | for (i = 0; i < pagevec_count(&pvec); i++) { | 892 | for (i = 0; i < pagevec_count(&pvec); i++) { |
887 | done = xfs_convert_page(inode, pvec.pages[i], tindex++, | 893 | done = xfs_convert_page(inode, pvec.pages[i], tindex++, |
888 | iomapp, ioendp, wbc, startio, all_bh); | 894 | imap, ioendp, wbc, all_bh); |
889 | if (done) | 895 | if (done) |
890 | break; | 896 | break; |
891 | } | 897 | } |
@@ -930,7 +936,7 @@ xfs_aops_discard_page( | |||
930 | loff_t offset = page_offset(page); | 936 | loff_t offset = page_offset(page); |
931 | ssize_t len = 1 << inode->i_blkbits; | 937 | ssize_t len = 1 << inode->i_blkbits; |
932 | 938 | ||
933 | if (!xfs_is_delayed_page(page, IOMAP_DELAY)) | 939 | if (!xfs_is_delayed_page(page, IO_DELAY)) |
934 | goto out_invalidate; | 940 | goto out_invalidate; |
935 | 941 | ||
936 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 942 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
@@ -964,7 +970,7 @@ xfs_aops_discard_page( | |||
964 | */ | 970 | */ |
965 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, | 971 | error = xfs_bmapi(NULL, ip, offset_fsb, 1, |
966 | XFS_BMAPI_ENTIRE, NULL, 0, &imap, | 972 | XFS_BMAPI_ENTIRE, NULL, 0, &imap, |
967 | &nimaps, NULL, NULL); | 973 | &nimaps, NULL); |
968 | 974 | ||
969 | if (error) { | 975 | if (error) { |
970 | /* something screwed, just bail */ | 976 | /* something screwed, just bail */ |
@@ -992,7 +998,7 @@ xfs_aops_discard_page( | |||
992 | */ | 998 | */ |
993 | xfs_bmap_init(&flist, &firstblock); | 999 | xfs_bmap_init(&flist, &firstblock); |
994 | error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, | 1000 | error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, |
995 | &flist, NULL, &done); | 1001 | &flist, &done); |
996 | 1002 | ||
997 | ASSERT(!flist.xbf_count && !flist.xbf_first); | 1003 | ASSERT(!flist.xbf_count && !flist.xbf_first); |
998 | if (error) { | 1004 | if (error) { |
@@ -1015,50 +1021,66 @@ out_invalidate: | |||
1015 | } | 1021 | } |
1016 | 1022 | ||
1017 | /* | 1023 | /* |
1018 | * Calling this without startio set means we are being asked to make a dirty | 1024 | * Write out a dirty page. |
1019 | * page ready for freeing it's buffers. When called with startio set then | ||
1020 | * we are coming from writepage. | ||
1021 | * | 1025 | * |
1022 | * When called with startio set it is important that we write the WHOLE | 1026 | * For delalloc space on the page we need to allocate space and flush it. |
1023 | * page if possible. | 1027 | * For unwritten space on the page we need to start the conversion to |
1024 | * The bh->b_state's cannot know if any of the blocks or which block for | 1028 | * regular allocated space. |
1025 | * that matter are dirty due to mmap writes, and therefore bh uptodate is | 1029 | * For any other dirty buffer heads on the page we should flush them. |
1026 | * only valid if the page itself isn't completely uptodate. Some layers | 1030 | * |
1027 | * may clear the page dirty flag prior to calling write page, under the | 1031 | * If we detect that a transaction would be required to flush the page, we |
1028 | * assumption the entire page will be written out; by not writing out the | 1032 | * have to check the process flags first, if we are already in a transaction |
1029 | * whole page the page can be reused before all valid dirty data is | 1033 | * or disk I/O during allocations is off, we need to fail the writepage and |
1030 | * written out. Note: in the case of a page that has been dirty'd by | 1034 | * redirty the page. |
1031 | * mapwrite and but partially setup by block_prepare_write the | ||
1032 | * bh->b_states's will not agree and only ones setup by BPW/BCW will have | ||
1033 | * valid state, thus the whole page must be written out thing. | ||
1034 | */ | 1035 | */ |
1035 | |||
1036 | STATIC int | 1036 | STATIC int |
1037 | xfs_page_state_convert( | 1037 | xfs_vm_writepage( |
1038 | struct inode *inode, | 1038 | struct page *page, |
1039 | struct page *page, | 1039 | struct writeback_control *wbc) |
1040 | struct writeback_control *wbc, | ||
1041 | int startio, | ||
1042 | int unmapped) /* also implies page uptodate */ | ||
1043 | { | 1040 | { |
1041 | struct inode *inode = page->mapping->host; | ||
1042 | int delalloc, unwritten; | ||
1044 | struct buffer_head *bh, *head; | 1043 | struct buffer_head *bh, *head; |
1045 | xfs_iomap_t iomap; | 1044 | struct xfs_bmbt_irec imap; |
1046 | xfs_ioend_t *ioend = NULL, *iohead = NULL; | 1045 | xfs_ioend_t *ioend = NULL, *iohead = NULL; |
1047 | loff_t offset; | 1046 | loff_t offset; |
1048 | unsigned long p_offset = 0; | ||
1049 | unsigned int type; | 1047 | unsigned int type; |
1050 | __uint64_t end_offset; | 1048 | __uint64_t end_offset; |
1051 | pgoff_t end_index, last_index, tlast; | 1049 | pgoff_t end_index, last_index; |
1052 | ssize_t size, len; | 1050 | ssize_t size, len; |
1053 | int flags, err, iomap_valid = 0, uptodate = 1; | 1051 | int flags, err, imap_valid = 0, uptodate = 1; |
1054 | int page_dirty, count = 0; | 1052 | int count = 0; |
1055 | int trylock = 0; | 1053 | int all_bh = 0; |
1056 | int all_bh = unmapped; | 1054 | |
1057 | 1055 | trace_xfs_writepage(inode, page, 0); | |
1058 | if (startio) { | 1056 | |
1059 | if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) | 1057 | ASSERT(page_has_buffers(page)); |
1060 | trylock |= BMAPI_TRYLOCK; | 1058 | |
1061 | } | 1059 | /* |
1060 | * Refuse to write the page out if we are called from reclaim context. | ||
1061 | * | ||
1062 | * This avoids stack overflows when called from deeply used stacks in | ||
1063 | * random callers for direct reclaim or memcg reclaim. We explicitly | ||
1064 | * allow reclaim from kswapd as the stack usage there is relatively low. | ||
1065 | * | ||
1066 | * This should really be done by the core VM, but until that happens | ||
1067 | * filesystems like XFS, btrfs and ext4 have to take care of this | ||
1068 | * by themselves. | ||
1069 | */ | ||
1070 | if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) | ||
1071 | goto out_fail; | ||
1072 | |||
1073 | /* | ||
1074 | * We need a transaction if there are delalloc or unwritten buffers | ||
1075 | * on the page. | ||
1076 | * | ||
1077 | * If we need a transaction and the process flags say we are already | ||
1078 | * in a transaction, or no IO is allowed then mark the page dirty | ||
1079 | * again and leave the page as is. | ||
1080 | */ | ||
1081 | xfs_count_page_state(page, &delalloc, &unwritten); | ||
1082 | if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) | ||
1083 | goto out_fail; | ||
1062 | 1084 | ||
1063 | /* Is this page beyond the end of the file? */ | 1085 | /* Is this page beyond the end of the file? */ |
1064 | offset = i_size_read(inode); | 1086 | offset = i_size_read(inode); |
@@ -1067,92 +1089,64 @@ xfs_page_state_convert( | |||
1067 | if (page->index >= end_index) { | 1089 | if (page->index >= end_index) { |
1068 | if ((page->index >= end_index + 1) || | 1090 | if ((page->index >= end_index + 1) || |
1069 | !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { | 1091 | !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { |
1070 | if (startio) | 1092 | unlock_page(page); |
1071 | unlock_page(page); | ||
1072 | return 0; | 1093 | return 0; |
1073 | } | 1094 | } |
1074 | } | 1095 | } |
1075 | 1096 | ||
1076 | /* | ||
1077 | * page_dirty is initially a count of buffers on the page before | ||
1078 | * EOF and is decremented as we move each into a cleanable state. | ||
1079 | * | ||
1080 | * Derivation: | ||
1081 | * | ||
1082 | * End offset is the highest offset that this page should represent. | ||
1083 | * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) | ||
1084 | * will evaluate non-zero and be less than PAGE_CACHE_SIZE and | ||
1085 | * hence give us the correct page_dirty count. On any other page, | ||
1086 | * it will be zero and in that case we need page_dirty to be the | ||
1087 | * count of buffers on the page. | ||
1088 | */ | ||
1089 | end_offset = min_t(unsigned long long, | 1097 | end_offset = min_t(unsigned long long, |
1090 | (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset); | 1098 | (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, |
1099 | offset); | ||
1091 | len = 1 << inode->i_blkbits; | 1100 | len = 1 << inode->i_blkbits; |
1092 | p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), | ||
1093 | PAGE_CACHE_SIZE); | ||
1094 | p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; | ||
1095 | page_dirty = p_offset / len; | ||
1096 | 1101 | ||
1097 | bh = head = page_buffers(page); | 1102 | bh = head = page_buffers(page); |
1098 | offset = page_offset(page); | 1103 | offset = page_offset(page); |
1099 | flags = BMAPI_READ; | 1104 | flags = BMAPI_READ; |
1100 | type = IOMAP_NEW; | 1105 | type = IO_NEW; |
1101 | |||
1102 | /* TODO: cleanup count and page_dirty */ | ||
1103 | 1106 | ||
1104 | do { | 1107 | do { |
1105 | if (offset >= end_offset) | 1108 | if (offset >= end_offset) |
1106 | break; | 1109 | break; |
1107 | if (!buffer_uptodate(bh)) | 1110 | if (!buffer_uptodate(bh)) |
1108 | uptodate = 0; | 1111 | uptodate = 0; |
1109 | if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { | 1112 | |
1110 | /* | 1113 | /* |
1111 | * the iomap is actually still valid, but the ioend | 1114 | * A hole may still be marked uptodate because discard_buffer |
1112 | * isn't. shouldn't happen too often. | 1115 | * leaves the flag set. |
1113 | */ | 1116 | */ |
1114 | iomap_valid = 0; | 1117 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { |
1118 | ASSERT(!buffer_dirty(bh)); | ||
1119 | imap_valid = 0; | ||
1115 | continue; | 1120 | continue; |
1116 | } | 1121 | } |
1117 | 1122 | ||
1118 | if (iomap_valid) | 1123 | if (imap_valid) |
1119 | iomap_valid = xfs_iomap_valid(&iomap, offset); | 1124 | imap_valid = xfs_imap_valid(inode, &imap, offset); |
1120 | 1125 | ||
1121 | /* | 1126 | if (buffer_unwritten(bh) || buffer_delay(bh)) { |
1122 | * First case, map an unwritten extent and prepare for | ||
1123 | * extent state conversion transaction on completion. | ||
1124 | * | ||
1125 | * Second case, allocate space for a delalloc buffer. | ||
1126 | * We can return EAGAIN here in the release page case. | ||
1127 | * | ||
1128 | * Third case, an unmapped buffer was found, and we are | ||
1129 | * in a path where we need to write the whole page out. | ||
1130 | */ | ||
1131 | if (buffer_unwritten(bh) || buffer_delay(bh) || | ||
1132 | ((buffer_uptodate(bh) || PageUptodate(page)) && | ||
1133 | !buffer_mapped(bh) && (unmapped || startio))) { | ||
1134 | int new_ioend = 0; | 1127 | int new_ioend = 0; |
1135 | 1128 | ||
1136 | /* | 1129 | /* |
1137 | * Make sure we don't use a read-only iomap | 1130 | * Make sure we don't use a read-only iomap |
1138 | */ | 1131 | */ |
1139 | if (flags == BMAPI_READ) | 1132 | if (flags == BMAPI_READ) |
1140 | iomap_valid = 0; | 1133 | imap_valid = 0; |
1141 | 1134 | ||
1142 | if (buffer_unwritten(bh)) { | 1135 | if (buffer_unwritten(bh)) { |
1143 | type = IOMAP_UNWRITTEN; | 1136 | type = IO_UNWRITTEN; |
1144 | flags = BMAPI_WRITE | BMAPI_IGNSTATE; | 1137 | flags = BMAPI_WRITE | BMAPI_IGNSTATE; |
1145 | } else if (buffer_delay(bh)) { | 1138 | } else if (buffer_delay(bh)) { |
1146 | type = IOMAP_DELAY; | 1139 | type = IO_DELAY; |
1147 | flags = BMAPI_ALLOCATE | trylock; | 1140 | flags = BMAPI_ALLOCATE; |
1148 | } else { | 1141 | |
1149 | type = IOMAP_NEW; | 1142 | if (wbc->sync_mode == WB_SYNC_NONE && |
1150 | flags = BMAPI_WRITE | BMAPI_MMAP; | 1143 | wbc->nonblocking) |
1144 | flags |= BMAPI_TRYLOCK; | ||
1151 | } | 1145 | } |
1152 | 1146 | ||
1153 | if (!iomap_valid) { | 1147 | if (!imap_valid) { |
1154 | /* | 1148 | /* |
1155 | * if we didn't have a valid mapping then we | 1149 | * If we didn't have a valid mapping then we |
1156 | * need to ensure that we put the new mapping | 1150 | * need to ensure that we put the new mapping |
1157 | * in a new ioend structure. This needs to be | 1151 | * in a new ioend structure. This needs to be |
1158 | * done to ensure that the ioends correctly | 1152 | * done to ensure that the ioends correctly |
@@ -1160,74 +1154,57 @@ xfs_page_state_convert( | |||
1160 | * for unwritten extent conversion. | 1154 | * for unwritten extent conversion. |
1161 | */ | 1155 | */ |
1162 | new_ioend = 1; | 1156 | new_ioend = 1; |
1163 | if (type == IOMAP_NEW) { | 1157 | err = xfs_map_blocks(inode, offset, len, |
1164 | size = xfs_probe_cluster(inode, | 1158 | &imap, flags); |
1165 | page, bh, head, 0); | ||
1166 | } else { | ||
1167 | size = len; | ||
1168 | } | ||
1169 | |||
1170 | err = xfs_map_blocks(inode, offset, size, | ||
1171 | &iomap, flags); | ||
1172 | if (err) | 1159 | if (err) |
1173 | goto error; | 1160 | goto error; |
1174 | iomap_valid = xfs_iomap_valid(&iomap, offset); | 1161 | imap_valid = xfs_imap_valid(inode, &imap, |
1162 | offset); | ||
1175 | } | 1163 | } |
1176 | if (iomap_valid) { | 1164 | if (imap_valid) { |
1177 | xfs_map_at_offset(bh, offset, | 1165 | xfs_map_at_offset(inode, bh, &imap, offset); |
1178 | inode->i_blkbits, &iomap); | 1166 | xfs_add_to_ioend(inode, bh, offset, type, |
1179 | if (startio) { | 1167 | &ioend, new_ioend); |
1180 | xfs_add_to_ioend(inode, bh, offset, | ||
1181 | type, &ioend, | ||
1182 | new_ioend); | ||
1183 | } else { | ||
1184 | set_buffer_dirty(bh); | ||
1185 | unlock_buffer(bh); | ||
1186 | mark_buffer_dirty(bh); | ||
1187 | } | ||
1188 | page_dirty--; | ||
1189 | count++; | 1168 | count++; |
1190 | } | 1169 | } |
1191 | } else if (buffer_uptodate(bh) && startio) { | 1170 | } else if (buffer_uptodate(bh)) { |
1192 | /* | 1171 | /* |
1193 | * we got here because the buffer is already mapped. | 1172 | * we got here because the buffer is already mapped. |
1194 | * That means it must already have extents allocated | 1173 | * That means it must already have extents allocated |
1195 | * underneath it. Map the extent by reading it. | 1174 | * underneath it. Map the extent by reading it. |
1196 | */ | 1175 | */ |
1197 | if (!iomap_valid || flags != BMAPI_READ) { | 1176 | if (!imap_valid || flags != BMAPI_READ) { |
1198 | flags = BMAPI_READ; | 1177 | flags = BMAPI_READ; |
1199 | size = xfs_probe_cluster(inode, page, bh, | 1178 | size = xfs_probe_cluster(inode, page, bh, head); |
1200 | head, 1); | ||
1201 | err = xfs_map_blocks(inode, offset, size, | 1179 | err = xfs_map_blocks(inode, offset, size, |
1202 | &iomap, flags); | 1180 | &imap, flags); |
1203 | if (err) | 1181 | if (err) |
1204 | goto error; | 1182 | goto error; |
1205 | iomap_valid = xfs_iomap_valid(&iomap, offset); | 1183 | imap_valid = xfs_imap_valid(inode, &imap, |
1184 | offset); | ||
1206 | } | 1185 | } |
1207 | 1186 | ||
1208 | /* | 1187 | /* |
1209 | * We set the type to IOMAP_NEW in case we are doing a | 1188 | * We set the type to IO_NEW in case we are doing a |
1210 | * small write at EOF that is extending the file but | 1189 | * small write at EOF that is extending the file but |
1211 | * without needing an allocation. We need to update the | 1190 | * without needing an allocation. We need to update the |
1212 | * file size on I/O completion in this case so it is | 1191 | * file size on I/O completion in this case so it is |
1213 | * the same case as having just allocated a new extent | 1192 | * the same case as having just allocated a new extent |
1214 | * that we are writing into for the first time. | 1193 | * that we are writing into for the first time. |
1215 | */ | 1194 | */ |
1216 | type = IOMAP_NEW; | 1195 | type = IO_NEW; |
1217 | if (trylock_buffer(bh)) { | 1196 | if (trylock_buffer(bh)) { |
1218 | ASSERT(buffer_mapped(bh)); | 1197 | if (imap_valid) |
1219 | if (iomap_valid) | ||
1220 | all_bh = 1; | 1198 | all_bh = 1; |
1221 | xfs_add_to_ioend(inode, bh, offset, type, | 1199 | xfs_add_to_ioend(inode, bh, offset, type, |
1222 | &ioend, !iomap_valid); | 1200 | &ioend, !imap_valid); |
1223 | page_dirty--; | ||
1224 | count++; | 1201 | count++; |
1225 | } else { | 1202 | } else { |
1226 | iomap_valid = 0; | 1203 | imap_valid = 0; |
1227 | } | 1204 | } |
1228 | } else if ((buffer_uptodate(bh) || PageUptodate(page)) && | 1205 | } else if (PageUptodate(page)) { |
1229 | (unmapped || startio)) { | 1206 | ASSERT(buffer_mapped(bh)); |
1230 | iomap_valid = 0; | 1207 | imap_valid = 0; |
1231 | } | 1208 | } |
1232 | 1209 | ||
1233 | if (!iohead) | 1210 | if (!iohead) |
@@ -1238,132 +1215,45 @@ xfs_page_state_convert( | |||
1238 | if (uptodate && bh == head) | 1215 | if (uptodate && bh == head) |
1239 | SetPageUptodate(page); | 1216 | SetPageUptodate(page); |
1240 | 1217 | ||
1241 | if (startio) | 1218 | xfs_start_page_writeback(page, 1, count); |
1242 | xfs_start_page_writeback(page, 1, count); | 1219 | |
1220 | if (ioend && imap_valid) { | ||
1221 | xfs_off_t end_index; | ||
1222 | |||
1223 | end_index = imap.br_startoff + imap.br_blockcount; | ||
1224 | |||
1225 | /* to bytes */ | ||
1226 | end_index <<= inode->i_blkbits; | ||
1243 | 1227 | ||
1244 | if (ioend && iomap_valid) { | 1228 | /* to pages */ |
1245 | offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >> | 1229 | end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; |
1246 | PAGE_CACHE_SHIFT; | 1230 | |
1247 | tlast = min_t(pgoff_t, offset, last_index); | 1231 | /* check against file size */ |
1248 | xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, | 1232 | if (end_index > last_index) |
1249 | wbc, startio, all_bh, tlast); | 1233 | end_index = last_index; |
1234 | |||
1235 | xfs_cluster_write(inode, page->index + 1, &imap, &ioend, | ||
1236 | wbc, all_bh, end_index); | ||
1250 | } | 1237 | } |
1251 | 1238 | ||
1252 | if (iohead) | 1239 | if (iohead) |
1253 | xfs_submit_ioend(wbc, iohead); | 1240 | xfs_submit_ioend(wbc, iohead); |
1254 | 1241 | ||
1255 | return page_dirty; | 1242 | return 0; |
1256 | 1243 | ||
1257 | error: | 1244 | error: |
1258 | if (iohead) | 1245 | if (iohead) |
1259 | xfs_cancel_ioend(iohead); | 1246 | xfs_cancel_ioend(iohead); |
1260 | 1247 | ||
1261 | /* | 1248 | xfs_aops_discard_page(page); |
1262 | * If it's delalloc and we have nowhere to put it, | 1249 | ClearPageUptodate(page); |
1263 | * throw it away, unless the lower layers told | 1250 | unlock_page(page); |
1264 | * us to try again. | ||
1265 | */ | ||
1266 | if (err != -EAGAIN) { | ||
1267 | if (!unmapped) | ||
1268 | xfs_aops_discard_page(page); | ||
1269 | ClearPageUptodate(page); | ||
1270 | } | ||
1271 | return err; | 1251 | return err; |
1272 | } | ||
1273 | |||
1274 | /* | ||
1275 | * writepage: Called from one of two places: | ||
1276 | * | ||
1277 | * 1. we are flushing a delalloc buffer head. | ||
1278 | * | ||
1279 | * 2. we are writing out a dirty page. Typically the page dirty | ||
1280 | * state is cleared before we get here. In this case is it | ||
1281 | * conceivable we have no buffer heads. | ||
1282 | * | ||
1283 | * For delalloc space on the page we need to allocate space and | ||
1284 | * flush it. For unmapped buffer heads on the page we should | ||
1285 | * allocate space if the page is uptodate. For any other dirty | ||
1286 | * buffer heads on the page we should flush them. | ||
1287 | * | ||
1288 | * If we detect that a transaction would be required to flush | ||
1289 | * the page, we have to check the process flags first, if we | ||
1290 | * are already in a transaction or disk I/O during allocations | ||
1291 | * is off, we need to fail the writepage and redirty the page. | ||
1292 | */ | ||
1293 | |||
1294 | STATIC int | ||
1295 | xfs_vm_writepage( | ||
1296 | struct page *page, | ||
1297 | struct writeback_control *wbc) | ||
1298 | { | ||
1299 | int error; | ||
1300 | int need_trans; | ||
1301 | int delalloc, unmapped, unwritten; | ||
1302 | struct inode *inode = page->mapping->host; | ||
1303 | |||
1304 | trace_xfs_writepage(inode, page, 0); | ||
1305 | |||
1306 | /* | ||
1307 | * We need a transaction if: | ||
1308 | * 1. There are delalloc buffers on the page | ||
1309 | * 2. The page is uptodate and we have unmapped buffers | ||
1310 | * 3. The page is uptodate and we have no buffers | ||
1311 | * 4. There are unwritten buffers on the page | ||
1312 | */ | ||
1313 | |||
1314 | if (!page_has_buffers(page)) { | ||
1315 | unmapped = 1; | ||
1316 | need_trans = 1; | ||
1317 | } else { | ||
1318 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | ||
1319 | if (!PageUptodate(page)) | ||
1320 | unmapped = 0; | ||
1321 | need_trans = delalloc + unmapped + unwritten; | ||
1322 | } | ||
1323 | |||
1324 | /* | ||
1325 | * If we need a transaction and the process flags say | ||
1326 | * we are already in a transaction, or no IO is allowed | ||
1327 | * then mark the page dirty again and leave the page | ||
1328 | * as is. | ||
1329 | */ | ||
1330 | if (current_test_flags(PF_FSTRANS) && need_trans) | ||
1331 | goto out_fail; | ||
1332 | |||
1333 | /* | ||
1334 | * Delay hooking up buffer heads until we have | ||
1335 | * made our go/no-go decision. | ||
1336 | */ | ||
1337 | if (!page_has_buffers(page)) | ||
1338 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); | ||
1339 | |||
1340 | |||
1341 | /* | ||
1342 | * VM calculation for nr_to_write seems off. Bump it way | ||
1343 | * up, this gets simple streaming writes zippy again. | ||
1344 | * To be reviewed again after Jens' writeback changes. | ||
1345 | */ | ||
1346 | wbc->nr_to_write *= 4; | ||
1347 | |||
1348 | /* | ||
1349 | * Convert delayed allocate, unwritten or unmapped space | ||
1350 | * to real space and flush out to disk. | ||
1351 | */ | ||
1352 | error = xfs_page_state_convert(inode, page, wbc, 1, unmapped); | ||
1353 | if (error == -EAGAIN) | ||
1354 | goto out_fail; | ||
1355 | if (unlikely(error < 0)) | ||
1356 | goto out_unlock; | ||
1357 | |||
1358 | return 0; | ||
1359 | 1252 | ||
1360 | out_fail: | 1253 | out_fail: |
1361 | redirty_page_for_writepage(wbc, page); | 1254 | redirty_page_for_writepage(wbc, page); |
1362 | unlock_page(page); | 1255 | unlock_page(page); |
1363 | return 0; | 1256 | return 0; |
1364 | out_unlock: | ||
1365 | unlock_page(page); | ||
1366 | return error; | ||
1367 | } | 1257 | } |
1368 | 1258 | ||
1369 | STATIC int | 1259 | STATIC int |
@@ -1377,65 +1267,27 @@ xfs_vm_writepages( | |||
1377 | 1267 | ||
1378 | /* | 1268 | /* |
1379 | * Called to move a page into cleanable state - and from there | 1269 | * Called to move a page into cleanable state - and from there |
1380 | * to be released. Possibly the page is already clean. We always | 1270 | * to be released. The page should already be clean. We always |
1381 | * have buffer heads in this call. | 1271 | * have buffer heads in this call. |
1382 | * | 1272 | * |
1383 | * Returns 0 if the page is ok to release, 1 otherwise. | 1273 | * Returns 1 if the page is ok to release, 0 otherwise. |
1384 | * | ||
1385 | * Possible scenarios are: | ||
1386 | * | ||
1387 | * 1. We are being called to release a page which has been written | ||
1388 | * to via regular I/O. buffer heads will be dirty and possibly | ||
1389 | * delalloc. If no delalloc buffer heads in this case then we | ||
1390 | * can just return zero. | ||
1391 | * | ||
1392 | * 2. We are called to release a page which has been written via | ||
1393 | * mmap, all we need to do is ensure there is no delalloc | ||
1394 | * state in the buffer heads, if not we can let the caller | ||
1395 | * free them and we should come back later via writepage. | ||
1396 | */ | 1274 | */ |
1397 | STATIC int | 1275 | STATIC int |
1398 | xfs_vm_releasepage( | 1276 | xfs_vm_releasepage( |
1399 | struct page *page, | 1277 | struct page *page, |
1400 | gfp_t gfp_mask) | 1278 | gfp_t gfp_mask) |
1401 | { | 1279 | { |
1402 | struct inode *inode = page->mapping->host; | 1280 | int delalloc, unwritten; |
1403 | int dirty, delalloc, unmapped, unwritten; | ||
1404 | struct writeback_control wbc = { | ||
1405 | .sync_mode = WB_SYNC_ALL, | ||
1406 | .nr_to_write = 1, | ||
1407 | }; | ||
1408 | |||
1409 | trace_xfs_releasepage(inode, page, 0); | ||
1410 | 1281 | ||
1411 | if (!page_has_buffers(page)) | 1282 | trace_xfs_releasepage(page->mapping->host, page, 0); |
1412 | return 0; | ||
1413 | 1283 | ||
1414 | xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); | 1284 | xfs_count_page_state(page, &delalloc, &unwritten); |
1415 | if (!delalloc && !unwritten) | ||
1416 | goto free_buffers; | ||
1417 | 1285 | ||
1418 | if (!(gfp_mask & __GFP_FS)) | 1286 | if (WARN_ON(delalloc)) |
1419 | return 0; | 1287 | return 0; |
1420 | 1288 | if (WARN_ON(unwritten)) | |
1421 | /* If we are already inside a transaction or the thread cannot | ||
1422 | * do I/O, we cannot release this page. | ||
1423 | */ | ||
1424 | if (current_test_flags(PF_FSTRANS)) | ||
1425 | return 0; | 1289 | return 0; |
1426 | 1290 | ||
1427 | /* | ||
1428 | * Convert delalloc space to real space, do not flush the | ||
1429 | * data out to disk, that will be done by the caller. | ||
1430 | * Never need to allocate space here - we will always | ||
1431 | * come back to writepage in that case. | ||
1432 | */ | ||
1433 | dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0); | ||
1434 | if (dirty == 0 && !unwritten) | ||
1435 | goto free_buffers; | ||
1436 | return 0; | ||
1437 | |||
1438 | free_buffers: | ||
1439 | return try_to_free_buffers(page); | 1291 | return try_to_free_buffers(page); |
1440 | } | 1292 | } |
1441 | 1293 | ||
@@ -1445,13 +1297,14 @@ __xfs_get_blocks( | |||
1445 | sector_t iblock, | 1297 | sector_t iblock, |
1446 | struct buffer_head *bh_result, | 1298 | struct buffer_head *bh_result, |
1447 | int create, | 1299 | int create, |
1448 | int direct, | 1300 | int direct) |
1449 | bmapi_flags_t flags) | ||
1450 | { | 1301 | { |
1451 | xfs_iomap_t iomap; | 1302 | int flags = create ? BMAPI_WRITE : BMAPI_READ; |
1303 | struct xfs_bmbt_irec imap; | ||
1452 | xfs_off_t offset; | 1304 | xfs_off_t offset; |
1453 | ssize_t size; | 1305 | ssize_t size; |
1454 | int niomap = 1; | 1306 | int nimap = 1; |
1307 | int new = 0; | ||
1455 | int error; | 1308 | int error; |
1456 | 1309 | ||
1457 | offset = (xfs_off_t)iblock << inode->i_blkbits; | 1310 | offset = (xfs_off_t)iblock << inode->i_blkbits; |
@@ -1461,23 +1314,25 @@ __xfs_get_blocks( | |||
1461 | if (!create && direct && offset >= i_size_read(inode)) | 1314 | if (!create && direct && offset >= i_size_read(inode)) |
1462 | return 0; | 1315 | return 0; |
1463 | 1316 | ||
1464 | error = xfs_iomap(XFS_I(inode), offset, size, | 1317 | if (direct && create) |
1465 | create ? flags : BMAPI_READ, &iomap, &niomap); | 1318 | flags |= BMAPI_DIRECT; |
1319 | |||
1320 | error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap, | ||
1321 | &new); | ||
1466 | if (error) | 1322 | if (error) |
1467 | return -error; | 1323 | return -error; |
1468 | if (niomap == 0) | 1324 | if (nimap == 0) |
1469 | return 0; | 1325 | return 0; |
1470 | 1326 | ||
1471 | if (iomap.iomap_bn != IOMAP_DADDR_NULL) { | 1327 | if (imap.br_startblock != HOLESTARTBLOCK && |
1328 | imap.br_startblock != DELAYSTARTBLOCK) { | ||
1472 | /* | 1329 | /* |
1473 | * For unwritten extents do not report a disk address on | 1330 | * For unwritten extents do not report a disk address on |
1474 | * the read case (treat as if we're reading into a hole). | 1331 | * the read case (treat as if we're reading into a hole). |
1475 | */ | 1332 | */ |
1476 | if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) { | 1333 | if (create || !ISUNWRITTEN(&imap)) |
1477 | xfs_map_buffer(bh_result, &iomap, offset, | 1334 | xfs_map_buffer(inode, bh_result, &imap, offset); |
1478 | inode->i_blkbits); | 1335 | if (create && ISUNWRITTEN(&imap)) { |
1479 | } | ||
1480 | if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) { | ||
1481 | if (direct) | 1336 | if (direct) |
1482 | bh_result->b_private = inode; | 1337 | bh_result->b_private = inode; |
1483 | set_buffer_unwritten(bh_result); | 1338 | set_buffer_unwritten(bh_result); |
@@ -1488,7 +1343,7 @@ __xfs_get_blocks( | |||
1488 | * If this is a realtime file, data may be on a different device. | 1343 | * If this is a realtime file, data may be on a different device. |
1489 | * to that pointed to from the buffer_head b_bdev currently. | 1344 | * to that pointed to from the buffer_head b_bdev currently. |
1490 | */ | 1345 | */ |
1491 | bh_result->b_bdev = iomap.iomap_target->bt_bdev; | 1346 | bh_result->b_bdev = xfs_find_bdev_for_inode(inode); |
1492 | 1347 | ||
1493 | /* | 1348 | /* |
1494 | * If we previously allocated a block out beyond eof and we are now | 1349 | * If we previously allocated a block out beyond eof and we are now |
@@ -1502,10 +1357,10 @@ __xfs_get_blocks( | |||
1502 | if (create && | 1357 | if (create && |
1503 | ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || | 1358 | ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || |
1504 | (offset >= i_size_read(inode)) || | 1359 | (offset >= i_size_read(inode)) || |
1505 | (iomap.iomap_flags & (IOMAP_NEW|IOMAP_UNWRITTEN)))) | 1360 | (new || ISUNWRITTEN(&imap)))) |
1506 | set_buffer_new(bh_result); | 1361 | set_buffer_new(bh_result); |
1507 | 1362 | ||
1508 | if (iomap.iomap_flags & IOMAP_DELAY) { | 1363 | if (imap.br_startblock == DELAYSTARTBLOCK) { |
1509 | BUG_ON(direct); | 1364 | BUG_ON(direct); |
1510 | if (create) { | 1365 | if (create) { |
1511 | set_buffer_uptodate(bh_result); | 1366 | set_buffer_uptodate(bh_result); |
@@ -1514,11 +1369,23 @@ __xfs_get_blocks( | |||
1514 | } | 1369 | } |
1515 | } | 1370 | } |
1516 | 1371 | ||
1372 | /* | ||
1373 | * If this is O_DIRECT or the mpage code calling tell them how large | ||
1374 | * the mapping is, so that we can avoid repeated get_blocks calls. | ||
1375 | */ | ||
1517 | if (direct || size > (1 << inode->i_blkbits)) { | 1376 | if (direct || size > (1 << inode->i_blkbits)) { |
1518 | ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0); | 1377 | xfs_off_t mapping_size; |
1519 | offset = min_t(xfs_off_t, | 1378 | |
1520 | iomap.iomap_bsize - iomap.iomap_delta, size); | 1379 | mapping_size = imap.br_startoff + imap.br_blockcount - iblock; |
1521 | bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset); | 1380 | mapping_size <<= inode->i_blkbits; |
1381 | |||
1382 | ASSERT(mapping_size > 0); | ||
1383 | if (mapping_size > size) | ||
1384 | mapping_size = size; | ||
1385 | if (mapping_size > LONG_MAX) | ||
1386 | mapping_size = LONG_MAX; | ||
1387 | |||
1388 | bh_result->b_size = mapping_size; | ||
1522 | } | 1389 | } |
1523 | 1390 | ||
1524 | return 0; | 1391 | return 0; |
@@ -1531,8 +1398,7 @@ xfs_get_blocks( | |||
1531 | struct buffer_head *bh_result, | 1398 | struct buffer_head *bh_result, |
1532 | int create) | 1399 | int create) |
1533 | { | 1400 | { |
1534 | return __xfs_get_blocks(inode, iblock, | 1401 | return __xfs_get_blocks(inode, iblock, bh_result, create, 0); |
1535 | bh_result, create, 0, BMAPI_WRITE); | ||
1536 | } | 1402 | } |
1537 | 1403 | ||
1538 | STATIC int | 1404 | STATIC int |
@@ -1542,61 +1408,59 @@ xfs_get_blocks_direct( | |||
1542 | struct buffer_head *bh_result, | 1408 | struct buffer_head *bh_result, |
1543 | int create) | 1409 | int create) |
1544 | { | 1410 | { |
1545 | return __xfs_get_blocks(inode, iblock, | 1411 | return __xfs_get_blocks(inode, iblock, bh_result, create, 1); |
1546 | bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); | ||
1547 | } | 1412 | } |
1548 | 1413 | ||
1414 | /* | ||
1415 | * Complete a direct I/O write request. | ||
1416 | * | ||
1417 | * If the private argument is non-NULL __xfs_get_blocks signals us that we | ||
1418 | * need to issue a transaction to convert the range from unwritten to written | ||
1419 | * extents. In case this is regular synchronous I/O we just call xfs_end_io | ||
1420 | * to do this and we are done. But in case this was a successfull AIO | ||
1421 | * request this handler is called from interrupt context, from which we | ||
1422 | * can't start transactions. In that case offload the I/O completion to | ||
1423 | * the workqueues we also use for buffered I/O completion. | ||
1424 | */ | ||
1549 | STATIC void | 1425 | STATIC void |
1550 | xfs_end_io_direct( | 1426 | xfs_end_io_direct_write( |
1551 | struct kiocb *iocb, | 1427 | struct kiocb *iocb, |
1552 | loff_t offset, | 1428 | loff_t offset, |
1553 | ssize_t size, | 1429 | ssize_t size, |
1554 | void *private) | 1430 | void *private, |
1431 | int ret, | ||
1432 | bool is_async) | ||
1555 | { | 1433 | { |
1556 | xfs_ioend_t *ioend = iocb->private; | 1434 | struct xfs_ioend *ioend = iocb->private; |
1557 | 1435 | ||
1558 | /* | 1436 | /* |
1559 | * Non-NULL private data means we need to issue a transaction to | 1437 | * blockdev_direct_IO can return an error even after the I/O |
1560 | * convert a range from unwritten to written extents. This needs | 1438 | * completion handler was called. Thus we need to protect |
1561 | * to happen from process context but aio+dio I/O completion | 1439 | * against double-freeing. |
1562 | * happens from irq context so we need to defer it to a workqueue. | ||
1563 | * This is not necessary for synchronous direct I/O, but we do | ||
1564 | * it anyway to keep the code uniform and simpler. | ||
1565 | * | ||
1566 | * Well, if only it were that simple. Because synchronous direct I/O | ||
1567 | * requires extent conversion to occur *before* we return to userspace, | ||
1568 | * we have to wait for extent conversion to complete. Look at the | ||
1569 | * iocb that has been passed to us to determine if this is AIO or | ||
1570 | * not. If it is synchronous, tell xfs_finish_ioend() to kick the | ||
1571 | * workqueue and wait for it to complete. | ||
1572 | * | ||
1573 | * The core direct I/O code might be changed to always call the | ||
1574 | * completion handler in the future, in which case all this can | ||
1575 | * go away. | ||
1576 | */ | 1440 | */ |
1441 | iocb->private = NULL; | ||
1442 | |||
1577 | ioend->io_offset = offset; | 1443 | ioend->io_offset = offset; |
1578 | ioend->io_size = size; | 1444 | ioend->io_size = size; |
1579 | if (ioend->io_type == IOMAP_READ) { | 1445 | if (private && size > 0) |
1580 | xfs_finish_ioend(ioend, 0); | 1446 | ioend->io_type = IO_UNWRITTEN; |
1581 | } else if (private && size > 0) { | 1447 | |
1582 | xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); | 1448 | if (is_async) { |
1583 | } else { | ||
1584 | /* | 1449 | /* |
1585 | * A direct I/O write ioend starts it's life in unwritten | 1450 | * If we are converting an unwritten extent we need to delay |
1586 | * state in case they map an unwritten extent. This write | 1451 | * the AIO completion until after the unwrittent extent |
1587 | * didn't map an unwritten extent so switch it's completion | 1452 | * conversion has completed, otherwise do it ASAP. |
1588 | * handler. | ||
1589 | */ | 1453 | */ |
1590 | ioend->io_type = IOMAP_NEW; | 1454 | if (ioend->io_type == IO_UNWRITTEN) { |
1591 | xfs_finish_ioend(ioend, 0); | 1455 | ioend->io_iocb = iocb; |
1456 | ioend->io_result = ret; | ||
1457 | } else { | ||
1458 | aio_complete(iocb, ret, 0); | ||
1459 | } | ||
1460 | xfs_finish_ioend(ioend); | ||
1461 | } else { | ||
1462 | xfs_finish_ioend_sync(ioend); | ||
1592 | } | 1463 | } |
1593 | |||
1594 | /* | ||
1595 | * blockdev_direct_IO can return an error even after the I/O | ||
1596 | * completion handler was called. Thus we need to protect | ||
1597 | * against double-freeing. | ||
1598 | */ | ||
1599 | iocb->private = NULL; | ||
1600 | } | 1464 | } |
1601 | 1465 | ||
1602 | STATIC ssize_t | 1466 | STATIC ssize_t |
@@ -1607,26 +1471,45 @@ xfs_vm_direct_IO( | |||
1607 | loff_t offset, | 1471 | loff_t offset, |
1608 | unsigned long nr_segs) | 1472 | unsigned long nr_segs) |
1609 | { | 1473 | { |
1610 | struct file *file = iocb->ki_filp; | 1474 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
1611 | struct inode *inode = file->f_mapping->host; | 1475 | struct block_device *bdev = xfs_find_bdev_for_inode(inode); |
1612 | struct block_device *bdev; | 1476 | ssize_t ret; |
1613 | ssize_t ret; | ||
1614 | |||
1615 | bdev = xfs_find_bdev_for_inode(XFS_I(inode)); | ||
1616 | 1477 | ||
1617 | iocb->private = xfs_alloc_ioend(inode, rw == WRITE ? | 1478 | if (rw & WRITE) { |
1618 | IOMAP_UNWRITTEN : IOMAP_READ); | 1479 | iocb->private = xfs_alloc_ioend(inode, IO_NEW); |
1619 | 1480 | ||
1620 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov, | 1481 | ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, |
1482 | offset, nr_segs, | ||
1483 | xfs_get_blocks_direct, | ||
1484 | xfs_end_io_direct_write, NULL, 0); | ||
1485 | if (ret != -EIOCBQUEUED && iocb->private) | ||
1486 | xfs_destroy_ioend(iocb->private); | ||
1487 | } else { | ||
1488 | ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, | ||
1621 | offset, nr_segs, | 1489 | offset, nr_segs, |
1622 | xfs_get_blocks_direct, | 1490 | xfs_get_blocks_direct, |
1623 | xfs_end_io_direct); | 1491 | NULL, NULL, 0); |
1492 | } | ||
1624 | 1493 | ||
1625 | if (unlikely(ret != -EIOCBQUEUED && iocb->private)) | ||
1626 | xfs_destroy_ioend(iocb->private); | ||
1627 | return ret; | 1494 | return ret; |
1628 | } | 1495 | } |
1629 | 1496 | ||
1497 | STATIC void | ||
1498 | xfs_vm_write_failed( | ||
1499 | struct address_space *mapping, | ||
1500 | loff_t to) | ||
1501 | { | ||
1502 | struct inode *inode = mapping->host; | ||
1503 | |||
1504 | if (to > inode->i_size) { | ||
1505 | struct iattr ia = { | ||
1506 | .ia_valid = ATTR_SIZE | ATTR_FORCE, | ||
1507 | .ia_size = inode->i_size, | ||
1508 | }; | ||
1509 | xfs_setattr(XFS_I(inode), &ia, XFS_ATTR_NOLOCK); | ||
1510 | } | ||
1511 | } | ||
1512 | |||
1630 | STATIC int | 1513 | STATIC int |
1631 | xfs_vm_write_begin( | 1514 | xfs_vm_write_begin( |
1632 | struct file *file, | 1515 | struct file *file, |
@@ -1637,9 +1520,31 @@ xfs_vm_write_begin( | |||
1637 | struct page **pagep, | 1520 | struct page **pagep, |
1638 | void **fsdata) | 1521 | void **fsdata) |
1639 | { | 1522 | { |
1640 | *pagep = NULL; | 1523 | int ret; |
1641 | return block_write_begin(file, mapping, pos, len, flags, pagep, fsdata, | 1524 | |
1642 | xfs_get_blocks); | 1525 | ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, |
1526 | pagep, xfs_get_blocks); | ||
1527 | if (unlikely(ret)) | ||
1528 | xfs_vm_write_failed(mapping, pos + len); | ||
1529 | return ret; | ||
1530 | } | ||
1531 | |||
1532 | STATIC int | ||
1533 | xfs_vm_write_end( | ||
1534 | struct file *file, | ||
1535 | struct address_space *mapping, | ||
1536 | loff_t pos, | ||
1537 | unsigned len, | ||
1538 | unsigned copied, | ||
1539 | struct page *page, | ||
1540 | void *fsdata) | ||
1541 | { | ||
1542 | int ret; | ||
1543 | |||
1544 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); | ||
1545 | if (unlikely(ret < len)) | ||
1546 | xfs_vm_write_failed(mapping, pos + len); | ||
1547 | return ret; | ||
1643 | } | 1548 | } |
1644 | 1549 | ||
1645 | STATIC sector_t | 1550 | STATIC sector_t |
@@ -1650,7 +1555,7 @@ xfs_vm_bmap( | |||
1650 | struct inode *inode = (struct inode *)mapping->host; | 1555 | struct inode *inode = (struct inode *)mapping->host; |
1651 | struct xfs_inode *ip = XFS_I(inode); | 1556 | struct xfs_inode *ip = XFS_I(inode); |
1652 | 1557 | ||
1653 | xfs_itrace_entry(XFS_I(inode)); | 1558 | trace_xfs_vm_bmap(XFS_I(inode)); |
1654 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 1559 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
1655 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); | 1560 | xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); |
1656 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 1561 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
@@ -1684,7 +1589,7 @@ const struct address_space_operations xfs_address_space_operations = { | |||
1684 | .releasepage = xfs_vm_releasepage, | 1589 | .releasepage = xfs_vm_releasepage, |
1685 | .invalidatepage = xfs_vm_invalidatepage, | 1590 | .invalidatepage = xfs_vm_invalidatepage, |
1686 | .write_begin = xfs_vm_write_begin, | 1591 | .write_begin = xfs_vm_write_begin, |
1687 | .write_end = generic_write_end, | 1592 | .write_end = xfs_vm_write_end, |
1688 | .bmap = xfs_vm_bmap, | 1593 | .bmap = xfs_vm_bmap, |
1689 | .direct_IO = xfs_vm_direct_IO, | 1594 | .direct_IO = xfs_vm_direct_IO, |
1690 | .migratepage = buffer_migrate_page, | 1595 | .migratepage = buffer_migrate_page, |