aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse/file.c')
-rw-r--r--fs/fuse/file.c92
1 files changed, 92 insertions, 0 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 1ee2fc92bc3e..4002889fbcc1 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -506,6 +506,98 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
506 } 506 }
507} 507}
508 508
509/**
510 * In case of short read, the caller sets 'pos' to the position of
511 * actual end of fuse request in IO request. Otherwise, if bytes_requested
512 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
513 *
514 * An example:
515 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
516 * both submitted asynchronously. The first of them was ACKed by userspace as
517 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
518 * second request was ACKed as short, e.g. only 1K was read, resulting in
519 * pos == 33K.
520 *
521 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
522 * will be equal to the length of the longest contiguous fragment of
523 * transferred data starting from the beginning of IO request.
524 */
525static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
526{
527 int left;
528
529 spin_lock(&io->lock);
530 if (err)
531 io->err = io->err ? : err;
532 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
533 io->bytes = pos;
534
535 left = --io->reqs;
536 spin_unlock(&io->lock);
537
538 if (!left) {
539 long res;
540
541 if (io->err)
542 res = io->err;
543 else if (io->bytes >= 0 && io->write)
544 res = -EIO;
545 else {
546 res = io->bytes < 0 ? io->size : io->bytes;
547
548 if (!is_sync_kiocb(io->iocb)) {
549 struct path *path = &io->iocb->ki_filp->f_path;
550 struct inode *inode = path->dentry->d_inode;
551 struct fuse_conn *fc = get_fuse_conn(inode);
552 struct fuse_inode *fi = get_fuse_inode(inode);
553
554 spin_lock(&fc->lock);
555 fi->attr_version = ++fc->attr_version;
556 spin_unlock(&fc->lock);
557 }
558 }
559
560 aio_complete(io->iocb, res, 0);
561 kfree(io);
562 }
563}
564
565static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
566{
567 struct fuse_io_priv *io = req->io;
568 ssize_t pos = -1;
569
570 fuse_release_user_pages(req, !io->write);
571
572 if (io->write) {
573 if (req->misc.write.in.size != req->misc.write.out.size)
574 pos = req->misc.write.in.offset - io->offset +
575 req->misc.write.out.size;
576 } else {
577 if (req->misc.read.in.size != req->out.args[0].size)
578 pos = req->misc.read.in.offset - io->offset +
579 req->out.args[0].size;
580 }
581
582 fuse_aio_complete(io, req->out.h.error, pos);
583}
584
585static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
586 size_t num_bytes, struct fuse_io_priv *io)
587{
588 spin_lock(&io->lock);
589 io->size += num_bytes;
590 io->reqs++;
591 spin_unlock(&io->lock);
592
593 req->io = io;
594 req->end = fuse_aio_complete_req;
595
596 fuse_request_send_background(fc, req);
597
598 return num_bytes;
599}
600
509static size_t fuse_send_read(struct fuse_req *req, struct file *file, 601static size_t fuse_send_read(struct fuse_req *req, struct file *file,
510 loff_t pos, size_t count, fl_owner_t owner) 602 loff_t pos, size_t count, fl_owner_t owner)
511{ 603{