aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-03-04 21:00:42 -0500
committerAlex Elder <aelder@sgi.com>2010-03-05 12:01:53 -0500
commit3ed3a4343b79a79d10e31f85f2d1afabcead76c6 (patch)
tree9d3982cfb179bff8071ef99f2e2b36e3872dfa6a /fs/xfs/linux-2.6
parent20f6b2c785cf187445f126321638ab8ba7aa7494 (diff)
xfs: truncate delalloc extents when IO fails in writeback
We currently use block_invalidatepage() to clean up pages where I/O fails in ->writepage(). Unfortunately, if the page has delalloc regions on it, we fail to remove the delalloc regions when we invalidate the page. This can result in tripping a BUG() in xfs_get_blocks() later on if a direct IO read is done on that same region - the delalloc extent is returned when none is supposed to be there. Fix this by truncating away the delalloc regions on the page before invalidating it. Because they are delalloc, we can do this without needing a transaction. Indeed - if we get ENOSPC errors, we have to be able to do this truncation without a transaction as there is no space left for block reservation (typically why we see a ENOSPC in writeback). Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c124
1 files changed, 114 insertions, 10 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 793908a8a09b..9083357f9e44 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -39,6 +39,7 @@
39#include "xfs_iomap.h" 39#include "xfs_iomap.h"
40#include "xfs_vnodeops.h" 40#include "xfs_vnodeops.h"
41#include "xfs_trace.h" 41#include "xfs_trace.h"
42#include "xfs_bmap.h"
42#include <linux/mpage.h> 43#include <linux/mpage.h>
43#include <linux/pagevec.h> 44#include <linux/pagevec.h>
44#include <linux/writeback.h> 45#include <linux/writeback.h>
@@ -893,6 +894,118 @@ xfs_cluster_write(
893 } 894 }
894} 895}
895 896
897STATIC void
898xfs_vm_invalidatepage(
899 struct page *page,
900 unsigned long offset)
901{
902 trace_xfs_invalidatepage(page->mapping->host, page, offset);
903 block_invalidatepage(page, offset);
904}
905
906/*
907 * If the page has delalloc buffers on it, we need to punch them out before we
908 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
909 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
910 * is done on that same region - the delalloc extent is returned when none is
911 * supposed to be there.
912 *
913 * We prevent this by truncating away the delalloc regions on the page before
914 * invalidating it. Because they are delalloc, we can do this without needing a
915 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
916 * truncation without a transaction as there is no space left for block
917 * reservation (typically why we see a ENOSPC in writeback).
918 *
919 * This is not a performance critical path, so for now just do the punching a
920 * buffer head at a time.
921 */
922STATIC void
923xfs_aops_discard_page(
924 struct page *page)
925{
926 struct inode *inode = page->mapping->host;
927 struct xfs_inode *ip = XFS_I(inode);
928 struct buffer_head *bh, *head;
929 loff_t offset = page_offset(page);
930 ssize_t len = 1 << inode->i_blkbits;
931
932 if (!xfs_is_delayed_page(page, IOMAP_DELAY))
933 goto out_invalidate;
934
935 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
936 "page discard on page %p, inode 0x%llx, offset %llu.",
937 page, ip->i_ino, offset);
938
939 xfs_ilock(ip, XFS_ILOCK_EXCL);
940 bh = head = page_buffers(page);
941 do {
942 int done;
943 xfs_fileoff_t offset_fsb;
944 xfs_bmbt_irec_t imap;
945 int nimaps = 1;
946 int error;
947 xfs_fsblock_t firstblock;
948 xfs_bmap_free_t flist;
949
950 if (!buffer_delay(bh))
951 goto next_buffer;
952
953 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
954
955 /*
956 * Map the range first and check that it is a delalloc extent
957 * before trying to unmap the range. Otherwise we will be
958 * trying to remove a real extent (which requires a
959 * transaction) or a hole, which is probably a bad idea...
960 */
961 error = xfs_bmapi(NULL, ip, offset_fsb, 1,
962 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
963 &nimaps, NULL, NULL);
964
965 if (error) {
966 /* something screwed, just bail */
967 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
968 "page discard failed delalloc mapping lookup.");
969 break;
970 }
971 if (!nimaps) {
972 /* nothing there */
973 goto next_buffer;
974 }
975 if (imap.br_startblock != DELAYSTARTBLOCK) {
976 /* been converted, ignore */
977 goto next_buffer;
978 }
979 WARN_ON(imap.br_blockcount == 0);
980
981 /*
982 * Note: while we initialise the firstblock/flist pair, they
983 * should never be used because blocks should never be
984 * allocated or freed for a delalloc extent and hence we need
985 * don't cancel or finish them after the xfs_bunmapi() call.
986 */
987 xfs_bmap_init(&flist, &firstblock);
988 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
989 &flist, NULL, &done);
990
991 ASSERT(!flist.xbf_count && !flist.xbf_first);
992 if (error) {
993 /* something screwed, just bail */
994 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
995 "page discard unable to remove delalloc mapping.");
996 break;
997 }
998next_buffer:
999 offset += len;
1000
1001 } while ((bh = bh->b_this_page) != head);
1002
1003 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1004out_invalidate:
1005 xfs_vm_invalidatepage(page, 0);
1006 return;
1007}
1008
896/* 1009/*
897 * Calling this without startio set means we are being asked to make a dirty 1010 * Calling this without startio set means we are being asked to make a dirty
898 * page ready for freeing it's buffers. When called with startio set then 1011 * page ready for freeing it's buffers. When called with startio set then
@@ -1144,7 +1257,7 @@ error:
1144 */ 1257 */
1145 if (err != -EAGAIN) { 1258 if (err != -EAGAIN) {
1146 if (!unmapped) 1259 if (!unmapped)
1147 block_invalidatepage(page, 0); 1260 xfs_aops_discard_page(page);
1148 ClearPageUptodate(page); 1261 ClearPageUptodate(page);
1149 } 1262 }
1150 return err; 1263 return err;
@@ -1554,15 +1667,6 @@ xfs_vm_readpages(
1554 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); 1667 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1555} 1668}
1556 1669
1557STATIC void
1558xfs_vm_invalidatepage(
1559 struct page *page,
1560 unsigned long offset)
1561{
1562 trace_xfs_invalidatepage(page->mapping->host, page, offset);
1563 block_invalidatepage(page, offset);
1564}
1565
1566const struct address_space_operations xfs_address_space_operations = { 1670const struct address_space_operations xfs_address_space_operations = {
1567 .readpage = xfs_vm_readpage, 1671 .readpage = xfs_vm_readpage,
1568 .readpages = xfs_vm_readpages, 1672 .readpages = xfs_vm_readpages,