aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/file.c
diff options
context:
space:
mode:
authorMaxim Patlasov <mpatlasov@parallels.com>2012-12-14 10:21:08 -0500
committerMiklos Szeredi <mszeredi@suse.cz>2013-04-17 15:50:59 -0400
commitbcba24ccdc82f7415154cf87226c2577cea13a5c (patch)
tree10e9c4e70f82cab86bd3790cd7ccb6adccf5aff0 /fs/fuse/file.c
parent36cf66ed9f871fc0d0911921fba5873df3ddb2dc (diff)
fuse: enable asynchronous processing direct IO
In case of synchronous DIO request (i.e. read(2) or write(2) for a file opened with O_DIRECT), the patch submits fuse requests asynchronously, but waits for their completions before return from fuse_direct_IO(). In case of asynchronous DIO request (i.e. libaio io_submit() or a file opened with O_DIRECT), the patch submits fuse requests asynchronously and return -EIOCBQUEUED immediately. The only special case is async DIO extending file. Here the patch falls back to old behaviour because we can't return -EIOCBQUEUED and update i_size later, without i_mutex hold. And we have no method to wait on real async I/O requests. The patch also clean __fuse_direct_write() up: it's better to update i_size in its callers. Thanks Brian for suggestion. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse/file.c')
-rw-r--r--fs/fuse/file.c51
1 files changed, 44 insertions, 7 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e207dcdf32c0..ba1d50369c24 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1366,11 +1366,8 @@ static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
1366 ssize_t res; 1366 ssize_t res;
1367 1367
1368 res = generic_write_checks(file, ppos, &count, 0); 1368 res = generic_write_checks(file, ppos, &count, 0);
1369 if (!res) { 1369 if (!res)
1370 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1); 1370 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
1371 if (!io->async && res > 0)
1372 fuse_write_update_size(inode, *ppos);
1373 }
1374 1371
1375 fuse_invalidate_attr(inode); 1372 fuse_invalidate_attr(inode);
1376 1373
@@ -1391,6 +1388,8 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1391 /* Don't allow parallel writes to the same file */ 1388 /* Don't allow parallel writes to the same file */
1392 mutex_lock(&inode->i_mutex); 1389 mutex_lock(&inode->i_mutex);
1393 res = __fuse_direct_write(&io, &iov, 1, ppos); 1390 res = __fuse_direct_write(&io, &iov, 1, ppos);
1391 if (res > 0)
1392 fuse_write_update_size(inode, *ppos);
1394 mutex_unlock(&inode->i_mutex); 1393 mutex_unlock(&inode->i_mutex);
1395 1394
1396 return res; 1395 return res;
@@ -2360,23 +2359,61 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2360 ssize_t ret = 0; 2359 ssize_t ret = 0;
2361 struct file *file = NULL; 2360 struct file *file = NULL;
2362 loff_t pos = 0; 2361 loff_t pos = 0;
2362 struct inode *inode;
2363 loff_t i_size;
2364 size_t count = iov_length(iov, nr_segs);
2363 struct fuse_io_priv *io; 2365 struct fuse_io_priv *io;
2364 2366
2365 file = iocb->ki_filp; 2367 file = iocb->ki_filp;
2366 pos = offset; 2368 pos = offset;
2369 inode = file->f_mapping->host;
2370 i_size = i_size_read(inode);
2367 2371
2368 io = kzalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); 2372 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2369 if (!io) 2373 if (!io)
2370 return -ENOMEM; 2374 return -ENOMEM;
2371 2375 spin_lock_init(&io->lock);
2376 io->reqs = 1;
2377 io->bytes = -1;
2378 io->size = 0;
2379 io->offset = offset;
2380 io->write = (rw == WRITE);
2381 io->err = 0;
2372 io->file = file; 2382 io->file = file;
2383 /*
2384 * By default, we want to optimize all I/Os with async request
2385 * submission to the client filesystem.
2386 */
2387 io->async = 1;
2388 io->iocb = iocb;
2389
2390 /*
2391 * We cannot asynchronously extend the size of a file. We have no method
2392 * to wait on real async I/O requests, so we must submit this request
2393 * synchronously.
2394 */
2395 if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE)
2396 io->async = false;
2373 2397
2374 if (rw == WRITE) 2398 if (rw == WRITE)
2375 ret = __fuse_direct_write(io, iov, nr_segs, &pos); 2399 ret = __fuse_direct_write(io, iov, nr_segs, &pos);
2376 else 2400 else
2377 ret = __fuse_direct_read(io, iov, nr_segs, &pos); 2401 ret = __fuse_direct_read(io, iov, nr_segs, &pos);
2378 2402
2379 kfree(io); 2403 if (io->async) {
2404 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2405
2406 /* we have a non-extending, async request, so return */
2407 if (ret > 0 && !is_sync_kiocb(iocb))
2408 return -EIOCBQUEUED;
2409
2410 ret = wait_on_sync_kiocb(iocb);
2411 } else {
2412 kfree(io);
2413 }
2414
2415 if (rw == WRITE && ret > 0)
2416 fuse_write_update_size(inode, pos);
2380 2417
2381 return ret; 2418 return ret;
2382} 2419}