aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2010-12-10 03:42:22 -0500
committerAlex Elder <aelder@sgi.com>2010-12-16 17:05:57 -0500
commited1e7b7e484dfb64168755613d499f32a97409bd (patch)
tree8f7a7d3feffd6652f1acf3b868bde6157c81c8b5
parent8ff2957d581582890693affc09920108a67cb05d (diff)
xfs: remove xfs_probe_cluster
xfs_map_blocks always calls xfs_bmapi with the XFS_BMAPI_ENTIRE entire flag, which tells it to not cap the extent at the passed in size, but just treat the size as an minimum to map. This means xfs_probe_cluster is entirely useless as we'll always get the whole extent back anyway. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Alex Elder <aelder@sgi.com>
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c111
1 files changed, 4 insertions, 107 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 1252a8443429..c3bc7690f043 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -304,13 +304,13 @@ STATIC int
304xfs_map_blocks( 304xfs_map_blocks(
305 struct inode *inode, 305 struct inode *inode,
306 loff_t offset, 306 loff_t offset,
307 ssize_t count,
308 struct xfs_bmbt_irec *imap, 307 struct xfs_bmbt_irec *imap,
309 int type, 308 int type,
310 int nonblocking) 309 int nonblocking)
311{ 310{
312 struct xfs_inode *ip = XFS_I(inode); 311 struct xfs_inode *ip = XFS_I(inode);
313 struct xfs_mount *mp = ip->i_mount; 312 struct xfs_mount *mp = ip->i_mount;
313 ssize_t count = 1 << inode->i_blkbits;
314 xfs_fileoff_t offset_fsb, end_fsb; 314 xfs_fileoff_t offset_fsb, end_fsb;
315 int error = 0; 315 int error = 0;
316 int bmapi_flags = XFS_BMAPI_ENTIRE; 316 int bmapi_flags = XFS_BMAPI_ENTIRE;
@@ -635,108 +635,6 @@ xfs_map_at_offset(
635} 635}
636 636
637/* 637/*
638 * Look for a page at index that is suitable for clustering.
639 */
640STATIC unsigned int
641xfs_probe_page(
642 struct page *page,
643 unsigned int pg_offset)
644{
645 struct buffer_head *bh, *head;
646 int ret = 0;
647
648 if (PageWriteback(page))
649 return 0;
650 if (!PageDirty(page))
651 return 0;
652 if (!page->mapping)
653 return 0;
654 if (!page_has_buffers(page))
655 return 0;
656
657 bh = head = page_buffers(page);
658 do {
659 if (!buffer_uptodate(bh))
660 break;
661 if (!buffer_mapped(bh))
662 break;
663 ret += bh->b_size;
664 if (ret >= pg_offset)
665 break;
666 } while ((bh = bh->b_this_page) != head);
667
668 return ret;
669}
670
671STATIC size_t
672xfs_probe_cluster(
673 struct inode *inode,
674 struct page *startpage,
675 struct buffer_head *bh,
676 struct buffer_head *head)
677{
678 struct pagevec pvec;
679 pgoff_t tindex, tlast, tloff;
680 size_t total = 0;
681 int done = 0, i;
682
683 /* First sum forwards in this page */
684 do {
685 if (!buffer_uptodate(bh) || !buffer_mapped(bh))
686 return total;
687 total += bh->b_size;
688 } while ((bh = bh->b_this_page) != head);
689
690 /* if we reached the end of the page, sum forwards in following pages */
691 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
692 tindex = startpage->index + 1;
693
694 /* Prune this back to avoid pathological behavior */
695 tloff = min(tlast, startpage->index + 64);
696
697 pagevec_init(&pvec, 0);
698 while (!done && tindex <= tloff) {
699 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
700
701 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
702 break;
703
704 for (i = 0; i < pagevec_count(&pvec); i++) {
705 struct page *page = pvec.pages[i];
706 size_t pg_offset, pg_len = 0;
707
708 if (tindex == tlast) {
709 pg_offset =
710 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
711 if (!pg_offset) {
712 done = 1;
713 break;
714 }
715 } else
716 pg_offset = PAGE_CACHE_SIZE;
717
718 if (page->index == tindex && trylock_page(page)) {
719 pg_len = xfs_probe_page(page, pg_offset);
720 unlock_page(page);
721 }
722
723 if (!pg_len) {
724 done = 1;
725 break;
726 }
727
728 total += pg_len;
729 tindex++;
730 }
731
732 pagevec_release(&pvec);
733 cond_resched();
734 }
735
736 return total;
737}
738
739/*
740 * Test if a given page is suitable for writing as part of an unwritten 638 * Test if a given page is suitable for writing as part of an unwritten
741 * or delayed allocate extent. 639 * or delayed allocate extent.
742 */ 640 */
@@ -1028,7 +926,7 @@ xfs_vm_writepage(
1028 unsigned int type; 926 unsigned int type;
1029 __uint64_t end_offset; 927 __uint64_t end_offset;
1030 pgoff_t end_index, last_index; 928 pgoff_t end_index, last_index;
1031 ssize_t size, len; 929 ssize_t len;
1032 int err, imap_valid = 0, uptodate = 1; 930 int err, imap_valid = 0, uptodate = 1;
1033 int count = 0; 931 int count = 0;
1034 int all_bh = 0; 932 int all_bh = 0;
@@ -1133,7 +1031,7 @@ xfs_vm_writepage(
1133 * for unwritten extent conversion. 1031 * for unwritten extent conversion.
1134 */ 1032 */
1135 new_ioend = 1; 1033 new_ioend = 1;
1136 err = xfs_map_blocks(inode, offset, len, &imap, 1034 err = xfs_map_blocks(inode, offset, &imap,
1137 type, nonblocking); 1035 type, nonblocking);
1138 if (err) 1036 if (err)
1139 goto error; 1037 goto error;
@@ -1158,8 +1056,7 @@ xfs_vm_writepage(
1158 } 1056 }
1159 if (!imap_valid) { 1057 if (!imap_valid) {
1160 new_ioend = 1; 1058 new_ioend = 1;
1161 size = xfs_probe_cluster(inode, page, bh, head); 1059 err = xfs_map_blocks(inode, offset,
1162 err = xfs_map_blocks(inode, offset, size,
1163 &imap, type, nonblocking); 1060 &imap, type, nonblocking);
1164 if (err) 1061 if (err)
1165 goto error; 1062 goto error;