aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_aops.c
diff options
context:
space:
mode:
authorAnton Altaparmakov <aia21@cantab.net>2005-05-21 17:00:02 -0400
committerAnton Altaparmakov <aia21@cantab.net>2005-05-21 17:00:02 -0400
commit67394f8f069c2fdf90f3b6d851824c07815442af (patch)
tree0c33e62d34dbaecea434ae9ece3cc0c56db8b1f7 /fs/xfs/linux-2.6/xfs_aops.c
parent450cbfbbbd88876e3ccec1d277f613221ca82bb7 (diff)
parent9636273dae265b9354b861b373cd43cd76a6d0fe (diff)
Merge with /usr/src/ntfs-2.6.git
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c81
1 files changed, 46 insertions, 35 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 76a84758073a..9278e9aba9ba 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -558,7 +558,8 @@ xfs_submit_page(
558 int i; 558 int i;
559 559
560 BUG_ON(PageWriteback(page)); 560 BUG_ON(PageWriteback(page));
561 set_page_writeback(page); 561 if (bh_count)
562 set_page_writeback(page);
562 if (clear_dirty) 563 if (clear_dirty)
563 clear_page_dirty(page); 564 clear_page_dirty(page);
564 unlock_page(page); 565 unlock_page(page);
@@ -578,9 +579,6 @@ xfs_submit_page(
578 579
579 if (probed_page && clear_dirty) 580 if (probed_page && clear_dirty)
580 wbc->nr_to_write--; /* Wrote an "extra" page */ 581 wbc->nr_to_write--; /* Wrote an "extra" page */
581 } else {
582 end_page_writeback(page);
583 wbc->pages_skipped++; /* We didn't write this page */
584 } 582 }
585} 583}
586 584
@@ -602,21 +600,26 @@ xfs_convert_page(
602{ 600{
603 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; 601 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
604 xfs_iomap_t *mp = iomapp, *tmp; 602 xfs_iomap_t *mp = iomapp, *tmp;
605 unsigned long end, offset; 603 unsigned long offset, end_offset;
606 pgoff_t end_index; 604 int index = 0;
607 int i = 0, index = 0;
608 int bbits = inode->i_blkbits; 605 int bbits = inode->i_blkbits;
606 int len, page_dirty;
609 607
610 end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; 608 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
611 if (page->index < end_index) { 609
612 end = PAGE_CACHE_SIZE; 610 /*
613 } else { 611 * page_dirty is initially a count of buffers on the page before
614 end = i_size_read(inode) & (PAGE_CACHE_SIZE-1); 612 * EOF and is decrememted as we move each into a cleanable state.
615 } 613 */
614 len = 1 << inode->i_blkbits;
615 end_offset = max(end_offset, PAGE_CACHE_SIZE);
616 end_offset = roundup(end_offset, len);
617 page_dirty = end_offset / len;
618
619 offset = 0;
616 bh = head = page_buffers(page); 620 bh = head = page_buffers(page);
617 do { 621 do {
618 offset = i << bbits; 622 if (offset >= end_offset)
619 if (offset >= end)
620 break; 623 break;
621 if (!(PageUptodate(page) || buffer_uptodate(bh))) 624 if (!(PageUptodate(page) || buffer_uptodate(bh)))
622 continue; 625 continue;
@@ -625,6 +628,7 @@ xfs_convert_page(
625 if (startio) { 628 if (startio) {
626 lock_buffer(bh); 629 lock_buffer(bh);
627 bh_arr[index++] = bh; 630 bh_arr[index++] = bh;
631 page_dirty--;
628 } 632 }
629 continue; 633 continue;
630 } 634 }
@@ -657,10 +661,11 @@ xfs_convert_page(
657 unlock_buffer(bh); 661 unlock_buffer(bh);
658 mark_buffer_dirty(bh); 662 mark_buffer_dirty(bh);
659 } 663 }
660 } while (i++, (bh = bh->b_this_page) != head); 664 page_dirty--;
665 } while (offset += len, (bh = bh->b_this_page) != head);
661 666
662 if (startio) { 667 if (startio && index) {
663 xfs_submit_page(page, wbc, bh_arr, index, 1, index == i); 668 xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
664 } else { 669 } else {
665 unlock_page(page); 670 unlock_page(page);
666 } 671 }
@@ -725,8 +730,11 @@ xfs_page_state_convert(
725 __uint64_t end_offset; 730 __uint64_t end_offset;
726 pgoff_t end_index, last_index, tlast; 731 pgoff_t end_index, last_index, tlast;
727 int len, err, i, cnt = 0, uptodate = 1; 732 int len, err, i, cnt = 0, uptodate = 1;
728 int flags = startio ? 0 : BMAPI_TRYLOCK; 733 int flags;
729 int page_dirty, delalloc = 0; 734 int page_dirty;
735
736 /* wait for other IO threads? */
737 flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;
730 738
731 /* Is this page beyond the end of the file? */ 739 /* Is this page beyond the end of the file? */
732 offset = i_size_read(inode); 740 offset = i_size_read(inode);
@@ -740,19 +748,22 @@ xfs_page_state_convert(
740 } 748 }
741 } 749 }
742 750
743 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
744 end_offset = min_t(unsigned long long, 751 end_offset = min_t(unsigned long long,
745 offset + PAGE_CACHE_SIZE, i_size_read(inode)); 752 (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
746 753 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
747 bh = head = page_buffers(page);
748 iomp = NULL;
749 754
750 /* 755 /*
751 * page_dirty is initially a count of buffers on the page and 756 * page_dirty is initially a count of buffers on the page before
752 * is decrememted as we move each into a cleanable state. 757 * EOF and is decrememted as we move each into a cleanable state.
753 */ 758 */
754 len = bh->b_size; 759 len = 1 << inode->i_blkbits;
755 page_dirty = PAGE_CACHE_SIZE / len; 760 p_offset = max(p_offset, PAGE_CACHE_SIZE);
761 p_offset = roundup(p_offset, len);
762 page_dirty = p_offset / len;
763
764 iomp = NULL;
765 p_offset = 0;
766 bh = head = page_buffers(page);
756 767
757 do { 768 do {
758 if (offset >= end_offset) 769 if (offset >= end_offset)
@@ -804,7 +815,6 @@ xfs_page_state_convert(
804 */ 815 */
805 } else if (buffer_delay(bh)) { 816 } else if (buffer_delay(bh)) {
806 if (!iomp) { 817 if (!iomp) {
807 delalloc = 1;
808 err = xfs_map_blocks(inode, offset, len, &iomap, 818 err = xfs_map_blocks(inode, offset, len, &iomap,
809 BMAPI_ALLOCATE | flags); 819 BMAPI_ALLOCATE | flags);
810 if (err) { 820 if (err) {
@@ -875,14 +885,15 @@ xfs_page_state_convert(
875 if (uptodate && bh == head) 885 if (uptodate && bh == head)
876 SetPageUptodate(page); 886 SetPageUptodate(page);
877 887
878 if (startio) 888 if (startio) {
879 xfs_submit_page(page, wbc, bh_arr, cnt, 0, 1); 889 WARN_ON(page_dirty);
890 xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
891 }
880 892
881 if (iomp) { 893 if (iomp) {
882 tlast = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> 894 offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
883 PAGE_CACHE_SHIFT; 895 PAGE_CACHE_SHIFT;
884 if (delalloc && (tlast > last_index)) 896 tlast = min_t(pgoff_t, offset, last_index);
885 tlast = last_index;
886 xfs_cluster_write(inode, page->index + 1, iomp, wbc, 897 xfs_cluster_write(inode, page->index + 1, iomp, wbc,
887 startio, unmapped, tlast); 898 startio, unmapped, tlast);
888 } 899 }