aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorMaxim Patlasov <mpatlasov@parallels.com>2012-12-14 10:20:41 -0500
committerMiklos Szeredi <mszeredi@suse.cz>2013-04-17 15:50:59 -0400
commit01e9d11a3e79035ca5cd89b035435acd4ba61ee1 (patch)
tree514830e4188f2799b91239cd669ef1629ce3e278 /fs/fuse
parent187c5c36330bc8d15674d9e6d2a2412de6b1034d (diff)
fuse: add support of async IO
The patch implements a framework to process an IO request asynchronously. The idea is to associate several fuse requests with a single kiocb by means of fuse_io_priv structure. The structure plays the same role for FUSE as 'struct dio' for direct-io.c. The framework is supposed to be used like this: - someone (who wants to process an IO asynchronously) allocates fuse_io_priv and initializes it setting 'async' field to non-zero value. - as soon as fuse request is filled, it can be submitted (in non-blocking way) by fuse_async_req_send() - when all submitted requests are ACKed by userspace, io->reqs drops to zero triggering aio_complete() In case of IO initiated by libaio, aio_complete() will finish processing the same way as in case of dio_complete() calling aio_complete(). But the framework may be also used for internal FUSE use when initial IO request was synchronous (from user perspective), but it's beneficial to process it asynchronously. Then the caller should wait on kiocb explicitly and aio_complete() will wake the caller up. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/file.c92
-rw-r--r--fs/fuse/fuse_i.h17
2 files changed, 109 insertions, 0 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 1ee2fc92bc3e..4002889fbcc1 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -506,6 +506,98 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
506 } 506 }
507} 507}
508 508
509/**
510 * In case of short read, the caller sets 'pos' to the position of
511 * actual end of fuse request in IO request. Otherwise, if bytes_requested
512 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
513 *
514 * An example:
515 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
516 * both submitted asynchronously. The first of them was ACKed by userspace as
517 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
518 * second request was ACKed as short, e.g. only 1K was read, resulting in
519 * pos == 33K.
520 *
521 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
522 * will be equal to the length of the longest contiguous fragment of
523 * transferred data starting from the beginning of IO request.
524 */
525static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
526{
527 int left;
528
529 spin_lock(&io->lock);
530 if (err)
531 io->err = io->err ? : err;
532 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
533 io->bytes = pos;
534
535 left = --io->reqs;
536 spin_unlock(&io->lock);
537
538 if (!left) {
539 long res;
540
541 if (io->err)
542 res = io->err;
543 else if (io->bytes >= 0 && io->write)
544 res = -EIO;
545 else {
546 res = io->bytes < 0 ? io->size : io->bytes;
547
548 if (!is_sync_kiocb(io->iocb)) {
549 struct path *path = &io->iocb->ki_filp->f_path;
550 struct inode *inode = path->dentry->d_inode;
551 struct fuse_conn *fc = get_fuse_conn(inode);
552 struct fuse_inode *fi = get_fuse_inode(inode);
553
554 spin_lock(&fc->lock);
555 fi->attr_version = ++fc->attr_version;
556 spin_unlock(&fc->lock);
557 }
558 }
559
560 aio_complete(io->iocb, res, 0);
561 kfree(io);
562 }
563}
564
565static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
566{
567 struct fuse_io_priv *io = req->io;
568 ssize_t pos = -1;
569
570 fuse_release_user_pages(req, !io->write);
571
572 if (io->write) {
573 if (req->misc.write.in.size != req->misc.write.out.size)
574 pos = req->misc.write.in.offset - io->offset +
575 req->misc.write.out.size;
576 } else {
577 if (req->misc.read.in.size != req->out.args[0].size)
578 pos = req->misc.read.in.offset - io->offset +
579 req->out.args[0].size;
580 }
581
582 fuse_aio_complete(io, req->out.h.error, pos);
583}
584
585static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
586 size_t num_bytes, struct fuse_io_priv *io)
587{
588 spin_lock(&io->lock);
589 io->size += num_bytes;
590 io->reqs++;
591 spin_unlock(&io->lock);
592
593 req->io = io;
594 req->end = fuse_aio_complete_req;
595
596 fuse_request_send_background(fc, req);
597
598 return num_bytes;
599}
600
509static size_t fuse_send_read(struct fuse_req *req, struct file *file, 601static size_t fuse_send_read(struct fuse_req *req, struct file *file,
510 loff_t pos, size_t count, fl_owner_t owner) 602 loff_t pos, size_t count, fl_owner_t owner)
511{ 603{
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 6bf30f2af901..aea072413c47 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -228,6 +228,20 @@ enum fuse_req_state {
228 FUSE_REQ_FINISHED 228 FUSE_REQ_FINISHED
229}; 229};
230 230
231/** The request IO state (for asynchronous processing) */
232struct fuse_io_priv {
233 int async;
234 spinlock_t lock;
235 unsigned reqs;
236 ssize_t bytes;
237 size_t size;
238 __u64 offset;
239 bool write;
240 int err;
241 struct kiocb *iocb;
242 struct file *file;
243};
244
231/** 245/**
232 * A request to the client 246 * A request to the client
233 */ 247 */
@@ -332,6 +346,9 @@ struct fuse_req {
332 /** Inode used in the request or NULL */ 346 /** Inode used in the request or NULL */
333 struct inode *inode; 347 struct inode *inode;
334 348
349 /** AIO control block */
350 struct fuse_io_priv *io;
351
335 /** Link on fi->writepages */ 352 /** Link on fi->writepages */
336 struct list_head writepages_entry; 353 struct list_head writepages_entry;
337 354