aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@sgi.com>2006-01-11 04:48:47 -0500
committerNathan Scott <nathans@sgi.com>2006-01-11 04:48:47 -0500
commit9260dc6b2ee011f728bae50edce11022567be096 (patch)
treee9422f63e7d15077feeaa6cc8e0a6a394e06cb85 /fs/xfs
parent1defeac9d4fffa3eabc4fba887e8ff5b1da7f361 (diff)
[XFS] various fixes for xfs_convert_page fix various bogusities in
handling offets From David Chinner and Christoph Hellwig SGI-PV: 947118 SGI-Modid: xfs-linux-melb:xfs-kern:203826a Signed-off-by: Christoph Hellwig <hch@sgi.com> Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c89
1 files changed, 52 insertions, 37 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index b306e25f0f07..64c909e5c1e5 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -624,12 +624,13 @@ xfs_convert_page(
624 int all_bh) 624 int all_bh)
625{ 625{
626 struct buffer_head *bh, *head; 626 struct buffer_head *bh, *head;
627 unsigned long p_offset, end_offset; 627 xfs_off_t end_offset;
628 unsigned long p_offset;
628 unsigned int type; 629 unsigned int type;
629 int bbits = inode->i_blkbits; 630 int bbits = inode->i_blkbits;
630 int len, page_dirty; 631 int len, page_dirty;
631 int count = 0, done = 0, uptodate = 1; 632 int count = 0, done = 0, uptodate = 1;
632 xfs_off_t f_offset = page_offset(page); 633 xfs_off_t offset = page_offset(page);
633 634
634 if (page->index != tindex) 635 if (page->index != tindex)
635 goto fail; 636 goto fail;
@@ -642,21 +643,33 @@ xfs_convert_page(
642 if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) 643 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
643 goto fail_unlock_page; 644 goto fail_unlock_page;
644 645
645 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
646
647 /* 646 /*
648 * page_dirty is initially a count of buffers on the page before 647 * page_dirty is initially a count of buffers on the page before
649 * EOF and is decrememted as we move each into a cleanable state. 648 * EOF and is decrememted as we move each into a cleanable state.
649 *
650 * Derivation:
651 *
652 * End offset is the highest offset that this page should represent.
653 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
654 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
655 * hence give us the correct page_dirty count. On any other page,
656 * it will be zero and in that case we need page_dirty to be the
657 * count of buffers on the page.
650 */ 658 */
659 end_offset = min_t(unsigned long long,
660 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
661 i_size_read(inode));
662
651 len = 1 << inode->i_blkbits; 663 len = 1 << inode->i_blkbits;
652 end_offset = max(end_offset, PAGE_CACHE_SIZE); 664 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
653 end_offset = roundup(end_offset, len); 665 PAGE_CACHE_SIZE);
654 page_dirty = end_offset / len; 666 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
667 page_dirty = p_offset / len;
655 668
656 p_offset = 0; 669 p_offset = 0;
657 bh = head = page_buffers(page); 670 bh = head = page_buffers(page);
658 do { 671 do {
659 if (p_offset >= end_offset) 672 if (offset >= end_offset)
660 break; 673 break;
661 if (!buffer_uptodate(bh)) 674 if (!buffer_uptodate(bh))
662 uptodate = 0; 675 uptodate = 0;
@@ -665,43 +678,45 @@ xfs_convert_page(
665 continue; 678 continue;
666 } 679 }
667 680
668 if (buffer_unwritten(bh)) 681 if (buffer_unwritten(bh) || buffer_delay(bh)) {
669 type = IOMAP_UNWRITTEN; 682 if (buffer_unwritten(bh))
670 else if (buffer_delay(bh)) 683 type = IOMAP_UNWRITTEN;
671 type = IOMAP_DELAY; 684 else
672 else { 685 type = IOMAP_DELAY;
673 type = 0; 686
674 if (!(buffer_mapped(bh) && all_bh && startio)) { 687 if (!xfs_iomap_valid(mp, offset)) {
675 done = 1; 688 done = 1;
676 } else if (startio) { 689 continue;
690 }
691
692 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
693 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
694
695 xfs_map_at_offset(bh, offset, bbits, mp);
696 if (startio) {
697 xfs_add_to_ioend(inode, bh, p_offset,
698 type, ioendp, done);
699 } else {
700 set_buffer_dirty(bh);
701 unlock_buffer(bh);
702 mark_buffer_dirty(bh);
703 }
704 page_dirty--;
705 count++;
706 } else {
707 type = 0;
708 if (buffer_mapped(bh) && all_bh && startio) {
677 lock_buffer(bh); 709 lock_buffer(bh);
678 xfs_add_to_ioend(inode, bh, p_offset, 710 xfs_add_to_ioend(inode, bh, p_offset,
679 type, ioendp, done); 711 type, ioendp, done);
680 count++; 712 count++;
681 page_dirty--; 713 page_dirty--;
714 } else {
715 done = 1;
682 } 716 }
683 continue;
684 }
685
686 if (!xfs_iomap_valid(mp, f_offset + p_offset)) {
687 done = 1;
688 continue;
689 }
690 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
691 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
692
693 xfs_map_at_offset(bh, f_offset + p_offset, bbits, mp);
694 if (startio) {
695 xfs_add_to_ioend(inode, bh, p_offset,
696 type, ioendp, done);
697 count++;
698 } else {
699 set_buffer_dirty(bh);
700 unlock_buffer(bh);
701 mark_buffer_dirty(bh);
702 } 717 }
703 page_dirty--; 718 } while (offset += len, p_offset += len,
704 } while (p_offset += len, (bh = bh->b_this_page) != head); 719 (bh = bh->b_this_page) != head);
705 720
706 if (uptodate && bh == head) 721 if (uptodate && bh == head)
707 SetPageUptodate(page); 722 SetPageUptodate(page);