aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_aops.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@sgi.com>2005-09-04 18:22:52 -0400
committerNathan Scott <nathans@sgi.com>2005-09-04 18:22:52 -0400
commitf09738638d3bae6501e8e160c66233832d8c280f (patch)
treee205b44c883da5b9ba37741bf5763b5e85d2bcfa /fs/xfs/linux-2.6/xfs_aops.c
parent65b3da3705ff873d8704074a75ac983495863380 (diff)
[XFS] Delay direct I/O completion to a workqueue This is nessecary
because aio+dio completions may happen from irq context but we need process context for converting unwritten extents. We also queue regular direct I/O completions to workqueue for regularity, there's only one queue_work call per syscall. SGI-PV: 934766 SGI-Modid: xfs-linux:xfs-kern:196857a Signed-off-by: Christoph Hellwig <hch@sgi.com> Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_aops.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c74
1 files changed, 48 insertions, 26 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index ed98c7ac7cfd..2add9a8a8df7 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -126,7 +126,7 @@ xfs_destroy_ioend(
126 126
127/* 127/*
128 * Issue transactions to convert a buffer range from unwritten 128 * Issue transactions to convert a buffer range from unwritten
129 * to written extents (buffered IO). 129 * to written extents.
130 */ 130 */
131STATIC void 131STATIC void
132xfs_end_bio_unwritten( 132xfs_end_bio_unwritten(
@@ -191,29 +191,6 @@ linvfs_unwritten_done(
191 end_buffer_async_write(bh, uptodate); 191 end_buffer_async_write(bh, uptodate);
192} 192}
193 193
194/*
195 * Issue transactions to convert a buffer range from unwritten
196 * to written extents (direct IO).
197 */
198STATIC void
199linvfs_unwritten_convert_direct(
200 struct kiocb *iocb,
201 loff_t offset,
202 ssize_t size,
203 void *private)
204{
205 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
206 ASSERT(!private || inode == (struct inode *)private);
207
208 /* private indicates an unwritten extent lay beneath this IO */
209 if (private && size > 0) {
210 vnode_t *vp = LINVFS_GET_VP(inode);
211 int error;
212
213 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
214 }
215}
216
217STATIC int 194STATIC int
218xfs_map_blocks( 195xfs_map_blocks(
219 struct inode *inode, 196 struct inode *inode,
@@ -1045,6 +1022,44 @@ linvfs_get_blocks_direct(
1045 create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1022 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1046} 1023}
1047 1024
1025STATIC void
1026linvfs_end_io_direct(
1027 struct kiocb *iocb,
1028 loff_t offset,
1029 ssize_t size,
1030 void *private)
1031{
1032 xfs_ioend_t *ioend = iocb->private;
1033
1034 /*
1035 * Non-NULL private data means we need to issue a transaction to
1036 * convert a range from unwritten to written extents. This needs
1037 * to happen from process contect but aio+dio I/O completion
1038 * happens from irq context so we need to defer it to a workqueue.
1039 * This is not nessecary for synchronous direct I/O, but we do
1040 * it anyway to keep the code uniform and simpler.
1041 *
1042 * The core direct I/O code might be changed to always call the
1043 * completion handler in the future, in which case all this can
1044 * go away.
1045 */
1046 if (private && size > 0) {
1047 ioend->io_offset = offset;
1048 ioend->io_size = size;
1049 xfs_finish_ioend(ioend);
1050 } else {
1051 ASSERT(size >= 0);
1052 xfs_destroy_ioend(ioend);
1053 }
1054
1055 /*
1056 * blockdev_direct_IO can return an error even afer the I/O
1057 * completion handler was called. Thus we need to protect
1058 * against double-freeing.
1059 */
1060 iocb->private = NULL;
1061}
1062
1048STATIC ssize_t 1063STATIC ssize_t
1049linvfs_direct_IO( 1064linvfs_direct_IO(
1050 int rw, 1065 int rw,
@@ -1059,16 +1074,23 @@ linvfs_direct_IO(
1059 xfs_iomap_t iomap; 1074 xfs_iomap_t iomap;
1060 int maps = 1; 1075 int maps = 1;
1061 int error; 1076 int error;
1077 ssize_t ret;
1062 1078
1063 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error); 1079 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1064 if (error) 1080 if (error)
1065 return -error; 1081 return -error;
1066 1082
1067 return blockdev_direct_IO_own_locking(rw, iocb, inode, 1083 iocb->private = xfs_alloc_ioend(inode);
1084
1085 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1068 iomap.iomap_target->pbr_bdev, 1086 iomap.iomap_target->pbr_bdev,
1069 iov, offset, nr_segs, 1087 iov, offset, nr_segs,
1070 linvfs_get_blocks_direct, 1088 linvfs_get_blocks_direct,
1071 linvfs_unwritten_convert_direct); 1089 linvfs_end_io_direct);
1090
1091 if (unlikely(ret <= 0 && iocb->private))
1092 xfs_destroy_ioend(iocb->private);
1093 return ret;
1072} 1094}
1073 1095
1074 1096