aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/file.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 13:12:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 13:12:32 -0400
commita26ea93a3d19c2b79e8b382356014eba607ce477 (patch)
tree9a37d34ff60121c78123bc8bd4aab2b8b841e36b /fs/fuse/file.c
parentc818c778b0384e5d9e8184ec43b73e05a7ced86f (diff)
parent60b9df7a54804a965850db00beec4d3a2c002536 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse
Pull fuse updates from Miklos Szeredi: "This contains two patchsets from Maxim Patlasov. The first reworks the request throttling so that only async requests are throttled. Wakeup of waiting async requests is also optimized. The second series adds support for async processing of direct IO which optimizes direct IO and enables the use of the AIO userspace interface." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse: fuse: add flag to turn on async direct IO fuse: truncate file if async dio failed fuse: optimize short direct reads fuse: enable asynchronous processing direct IO fuse: make fuse_direct_io() aware about AIO fuse: add support of async IO fuse: move fuse_release_user_pages() up fuse: optimize wake_up fuse: implement exclusive wakeup for blocked_waitq fuse: skip blocking on allocations of synchronous requests fuse: add flag fc->initialized fuse: make request allocations for background processing explicit
Diffstat (limited to 'fs/fuse/file.c')
-rw-r--r--fs/fuse/file.c272
1 files changed, 234 insertions, 38 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d15c6f21c17f..4655e59d545b 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -126,11 +126,13 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
126 struct fuse_req *req = ff->reserved_req; 126 struct fuse_req *req = ff->reserved_req;
127 127
128 if (sync) { 128 if (sync) {
129 req->background = 0;
129 fuse_request_send(ff->fc, req); 130 fuse_request_send(ff->fc, req);
130 path_put(&req->misc.release.path); 131 path_put(&req->misc.release.path);
131 fuse_put_request(ff->fc, req); 132 fuse_put_request(ff->fc, req);
132 } else { 133 } else {
133 req->end = fuse_release_end; 134 req->end = fuse_release_end;
135 req->background = 1;
134 fuse_request_send_background(ff->fc, req); 136 fuse_request_send_background(ff->fc, req);
135 } 137 }
136 kfree(ff); 138 kfree(ff);
@@ -282,6 +284,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
282 WARN_ON(atomic_read(&ff->count) > 1); 284 WARN_ON(atomic_read(&ff->count) > 1);
283 fuse_prepare_release(ff, flags, FUSE_RELEASE); 285 fuse_prepare_release(ff, flags, FUSE_RELEASE);
284 ff->reserved_req->force = 1; 286 ff->reserved_req->force = 1;
287 ff->reserved_req->background = 0;
285 fuse_request_send(ff->fc, ff->reserved_req); 288 fuse_request_send(ff->fc, ff->reserved_req);
286 fuse_put_request(ff->fc, ff->reserved_req); 289 fuse_put_request(ff->fc, ff->reserved_req);
287 kfree(ff); 290 kfree(ff);
@@ -491,9 +494,115 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
491 req->out.args[0].size = count; 494 req->out.args[0].size = count;
492} 495}
493 496
494static size_t fuse_send_read(struct fuse_req *req, struct file *file, 497static void fuse_release_user_pages(struct fuse_req *req, int write)
498{
499 unsigned i;
500
501 for (i = 0; i < req->num_pages; i++) {
502 struct page *page = req->pages[i];
503 if (write)
504 set_page_dirty_lock(page);
505 put_page(page);
506 }
507}
508
509/**
510 * In case of short read, the caller sets 'pos' to the position of
511 * actual end of fuse request in IO request. Otherwise, if bytes_requested
512 * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
513 *
514 * An example:
515 * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
516 * both submitted asynchronously. The first of them was ACKed by userspace as
517 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
518 * second request was ACKed as short, e.g. only 1K was read, resulting in
519 * pos == 33K.
520 *
521 * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
522 * will be equal to the length of the longest contiguous fragment of
523 * transferred data starting from the beginning of IO request.
524 */
525static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
526{
527 int left;
528
529 spin_lock(&io->lock);
530 if (err)
531 io->err = io->err ? : err;
532 else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
533 io->bytes = pos;
534
535 left = --io->reqs;
536 spin_unlock(&io->lock);
537
538 if (!left) {
539 long res;
540
541 if (io->err)
542 res = io->err;
543 else if (io->bytes >= 0 && io->write)
544 res = -EIO;
545 else {
546 res = io->bytes < 0 ? io->size : io->bytes;
547
548 if (!is_sync_kiocb(io->iocb)) {
549 struct path *path = &io->iocb->ki_filp->f_path;
550 struct inode *inode = path->dentry->d_inode;
551 struct fuse_conn *fc = get_fuse_conn(inode);
552 struct fuse_inode *fi = get_fuse_inode(inode);
553
554 spin_lock(&fc->lock);
555 fi->attr_version = ++fc->attr_version;
556 spin_unlock(&fc->lock);
557 }
558 }
559
560 aio_complete(io->iocb, res, 0);
561 kfree(io);
562 }
563}
564
565static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
566{
567 struct fuse_io_priv *io = req->io;
568 ssize_t pos = -1;
569
570 fuse_release_user_pages(req, !io->write);
571
572 if (io->write) {
573 if (req->misc.write.in.size != req->misc.write.out.size)
574 pos = req->misc.write.in.offset - io->offset +
575 req->misc.write.out.size;
576 } else {
577 if (req->misc.read.in.size != req->out.args[0].size)
578 pos = req->misc.read.in.offset - io->offset +
579 req->out.args[0].size;
580 }
581
582 fuse_aio_complete(io, req->out.h.error, pos);
583}
584
585static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
586 size_t num_bytes, struct fuse_io_priv *io)
587{
588 spin_lock(&io->lock);
589 io->size += num_bytes;
590 io->reqs++;
591 spin_unlock(&io->lock);
592
593 req->io = io;
594 req->end = fuse_aio_complete_req;
595
596 __fuse_get_request(req);
597 fuse_request_send_background(fc, req);
598
599 return num_bytes;
600}
601
602static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
495 loff_t pos, size_t count, fl_owner_t owner) 603 loff_t pos, size_t count, fl_owner_t owner)
496{ 604{
605 struct file *file = io->file;
497 struct fuse_file *ff = file->private_data; 606 struct fuse_file *ff = file->private_data;
498 struct fuse_conn *fc = ff->fc; 607 struct fuse_conn *fc = ff->fc;
499 608
@@ -504,6 +613,10 @@ static size_t fuse_send_read(struct fuse_req *req, struct file *file,
504 inarg->read_flags |= FUSE_READ_LOCKOWNER; 613 inarg->read_flags |= FUSE_READ_LOCKOWNER;
505 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 614 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
506 } 615 }
616
617 if (io->async)
618 return fuse_async_req_send(fc, req, count, io);
619
507 fuse_request_send(fc, req); 620 fuse_request_send(fc, req);
508 return req->out.args[0].size; 621 return req->out.args[0].size;
509} 622}
@@ -524,6 +637,7 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
524 637
525static int fuse_readpage(struct file *file, struct page *page) 638static int fuse_readpage(struct file *file, struct page *page)
526{ 639{
640 struct fuse_io_priv io = { .async = 0, .file = file };
527 struct inode *inode = page->mapping->host; 641 struct inode *inode = page->mapping->host;
528 struct fuse_conn *fc = get_fuse_conn(inode); 642 struct fuse_conn *fc = get_fuse_conn(inode);
529 struct fuse_req *req; 643 struct fuse_req *req;
@@ -556,7 +670,7 @@ static int fuse_readpage(struct file *file, struct page *page)
556 req->num_pages = 1; 670 req->num_pages = 1;
557 req->pages[0] = page; 671 req->pages[0] = page;
558 req->page_descs[0].length = count; 672 req->page_descs[0].length = count;
559 num_read = fuse_send_read(req, file, pos, count, NULL); 673 num_read = fuse_send_read(req, &io, pos, count, NULL);
560 err = req->out.h.error; 674 err = req->out.h.error;
561 fuse_put_request(fc, req); 675 fuse_put_request(fc, req);
562 676
@@ -661,7 +775,12 @@ static int fuse_readpages_fill(void *_data, struct page *page)
661 int nr_alloc = min_t(unsigned, data->nr_pages, 775 int nr_alloc = min_t(unsigned, data->nr_pages,
662 FUSE_MAX_PAGES_PER_REQ); 776 FUSE_MAX_PAGES_PER_REQ);
663 fuse_send_readpages(req, data->file); 777 fuse_send_readpages(req, data->file);
664 data->req = req = fuse_get_req(fc, nr_alloc); 778 if (fc->async_read)
779 req = fuse_get_req_for_background(fc, nr_alloc);
780 else
781 req = fuse_get_req(fc, nr_alloc);
782
783 data->req = req;
665 if (IS_ERR(req)) { 784 if (IS_ERR(req)) {
666 unlock_page(page); 785 unlock_page(page);
667 return PTR_ERR(req); 786 return PTR_ERR(req);
@@ -696,7 +815,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
696 815
697 data.file = file; 816 data.file = file;
698 data.inode = inode; 817 data.inode = inode;
699 data.req = fuse_get_req(fc, nr_alloc); 818 if (fc->async_read)
819 data.req = fuse_get_req_for_background(fc, nr_alloc);
820 else
821 data.req = fuse_get_req(fc, nr_alloc);
700 data.nr_pages = nr_pages; 822 data.nr_pages = nr_pages;
701 err = PTR_ERR(data.req); 823 err = PTR_ERR(data.req);
702 if (IS_ERR(data.req)) 824 if (IS_ERR(data.req))
@@ -758,9 +880,10 @@ static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
758 req->out.args[0].value = outarg; 880 req->out.args[0].value = outarg;
759} 881}
760 882
761static size_t fuse_send_write(struct fuse_req *req, struct file *file, 883static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
762 loff_t pos, size_t count, fl_owner_t owner) 884 loff_t pos, size_t count, fl_owner_t owner)
763{ 885{
886 struct file *file = io->file;
764 struct fuse_file *ff = file->private_data; 887 struct fuse_file *ff = file->private_data;
765 struct fuse_conn *fc = ff->fc; 888 struct fuse_conn *fc = ff->fc;
766 struct fuse_write_in *inarg = &req->misc.write.in; 889 struct fuse_write_in *inarg = &req->misc.write.in;
@@ -771,6 +894,10 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file,
771 inarg->write_flags |= FUSE_WRITE_LOCKOWNER; 894 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
772 inarg->lock_owner = fuse_lock_owner_id(fc, owner); 895 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
773 } 896 }
897
898 if (io->async)
899 return fuse_async_req_send(fc, req, count, io);
900
774 fuse_request_send(fc, req); 901 fuse_request_send(fc, req);
775 return req->misc.write.out.size; 902 return req->misc.write.out.size;
776} 903}
@@ -794,11 +921,12 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
794 size_t res; 921 size_t res;
795 unsigned offset; 922 unsigned offset;
796 unsigned i; 923 unsigned i;
924 struct fuse_io_priv io = { .async = 0, .file = file };
797 925
798 for (i = 0; i < req->num_pages; i++) 926 for (i = 0; i < req->num_pages; i++)
799 fuse_wait_on_page_writeback(inode, req->pages[i]->index); 927 fuse_wait_on_page_writeback(inode, req->pages[i]->index);
800 928
801 res = fuse_send_write(req, file, pos, count, NULL); 929 res = fuse_send_write(req, &io, pos, count, NULL);
802 930
803 offset = req->page_descs[0].offset; 931 offset = req->page_descs[0].offset;
804 count = res; 932 count = res;
@@ -1033,18 +1161,6 @@ out:
1033 return written ? written : err; 1161 return written ? written : err;
1034} 1162}
1035 1163
1036static void fuse_release_user_pages(struct fuse_req *req, int write)
1037{
1038 unsigned i;
1039
1040 for (i = 0; i < req->num_pages; i++) {
1041 struct page *page = req->pages[i];
1042 if (write)
1043 set_page_dirty_lock(page);
1044 put_page(page);
1045 }
1046}
1047
1048static inline void fuse_page_descs_length_init(struct fuse_req *req, 1164static inline void fuse_page_descs_length_init(struct fuse_req *req,
1049 unsigned index, unsigned nr_pages) 1165 unsigned index, unsigned nr_pages)
1050{ 1166{
@@ -1146,10 +1262,11 @@ static inline int fuse_iter_npages(const struct iov_iter *ii_p)
1146 return min(npages, FUSE_MAX_PAGES_PER_REQ); 1262 return min(npages, FUSE_MAX_PAGES_PER_REQ);
1147} 1263}
1148 1264
1149ssize_t fuse_direct_io(struct file *file, const struct iovec *iov, 1265ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
1150 unsigned long nr_segs, size_t count, loff_t *ppos, 1266 unsigned long nr_segs, size_t count, loff_t *ppos,
1151 int write) 1267 int write)
1152{ 1268{
1269 struct file *file = io->file;
1153 struct fuse_file *ff = file->private_data; 1270 struct fuse_file *ff = file->private_data;
1154 struct fuse_conn *fc = ff->fc; 1271 struct fuse_conn *fc = ff->fc;
1155 size_t nmax = write ? fc->max_write : fc->max_read; 1272 size_t nmax = write ? fc->max_write : fc->max_read;
@@ -1175,11 +1292,12 @@ ssize_t fuse_direct_io(struct file *file, const struct iovec *iov,
1175 } 1292 }
1176 1293
1177 if (write) 1294 if (write)
1178 nres = fuse_send_write(req, file, pos, nbytes, owner); 1295 nres = fuse_send_write(req, io, pos, nbytes, owner);
1179 else 1296 else
1180 nres = fuse_send_read(req, file, pos, nbytes, owner); 1297 nres = fuse_send_read(req, io, pos, nbytes, owner);
1181 1298
1182 fuse_release_user_pages(req, !write); 1299 if (!io->async)
1300 fuse_release_user_pages(req, !write);
1183 if (req->out.h.error) { 1301 if (req->out.h.error) {
1184 if (!res) 1302 if (!res)
1185 res = req->out.h.error; 1303 res = req->out.h.error;
@@ -1209,17 +1327,19 @@ ssize_t fuse_direct_io(struct file *file, const struct iovec *iov,
1209} 1327}
1210EXPORT_SYMBOL_GPL(fuse_direct_io); 1328EXPORT_SYMBOL_GPL(fuse_direct_io);
1211 1329
1212static ssize_t __fuse_direct_read(struct file *file, const struct iovec *iov, 1330static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
1213 unsigned long nr_segs, loff_t *ppos) 1331 const struct iovec *iov,
1332 unsigned long nr_segs, loff_t *ppos,
1333 size_t count)
1214{ 1334{
1215 ssize_t res; 1335 ssize_t res;
1336 struct file *file = io->file;
1216 struct inode *inode = file_inode(file); 1337 struct inode *inode = file_inode(file);
1217 1338
1218 if (is_bad_inode(inode)) 1339 if (is_bad_inode(inode))
1219 return -EIO; 1340 return -EIO;
1220 1341
1221 res = fuse_direct_io(file, iov, nr_segs, iov_length(iov, nr_segs), 1342 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
1222 ppos, 0);
1223 1343
1224 fuse_invalidate_attr(inode); 1344 fuse_invalidate_attr(inode);
1225 1345
@@ -1229,23 +1349,23 @@ static ssize_t __fuse_direct_read(struct file *file, const struct iovec *iov,
1229static ssize_t fuse_direct_read(struct file *file, char __user *buf, 1349static ssize_t fuse_direct_read(struct file *file, char __user *buf,
1230 size_t count, loff_t *ppos) 1350 size_t count, loff_t *ppos)
1231{ 1351{
1352 struct fuse_io_priv io = { .async = 0, .file = file };
1232 struct iovec iov = { .iov_base = buf, .iov_len = count }; 1353 struct iovec iov = { .iov_base = buf, .iov_len = count };
1233 return __fuse_direct_read(file, &iov, 1, ppos); 1354 return __fuse_direct_read(&io, &iov, 1, ppos, count);
1234} 1355}
1235 1356
1236static ssize_t __fuse_direct_write(struct file *file, const struct iovec *iov, 1357static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
1358 const struct iovec *iov,
1237 unsigned long nr_segs, loff_t *ppos) 1359 unsigned long nr_segs, loff_t *ppos)
1238{ 1360{
1361 struct file *file = io->file;
1239 struct inode *inode = file_inode(file); 1362 struct inode *inode = file_inode(file);
1240 size_t count = iov_length(iov, nr_segs); 1363 size_t count = iov_length(iov, nr_segs);
1241 ssize_t res; 1364 ssize_t res;
1242 1365
1243 res = generic_write_checks(file, ppos, &count, 0); 1366 res = generic_write_checks(file, ppos, &count, 0);
1244 if (!res) { 1367 if (!res)
1245 res = fuse_direct_io(file, iov, nr_segs, count, ppos, 1); 1368 res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
1246 if (res > 0)
1247 fuse_write_update_size(inode, *ppos);
1248 }
1249 1369
1250 fuse_invalidate_attr(inode); 1370 fuse_invalidate_attr(inode);
1251 1371
@@ -1258,13 +1378,16 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1258 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; 1378 struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
1259 struct inode *inode = file_inode(file); 1379 struct inode *inode = file_inode(file);
1260 ssize_t res; 1380 ssize_t res;
1381 struct fuse_io_priv io = { .async = 0, .file = file };
1261 1382
1262 if (is_bad_inode(inode)) 1383 if (is_bad_inode(inode))
1263 return -EIO; 1384 return -EIO;
1264 1385
1265 /* Don't allow parallel writes to the same file */ 1386 /* Don't allow parallel writes to the same file */
1266 mutex_lock(&inode->i_mutex); 1387 mutex_lock(&inode->i_mutex);
1267 res = __fuse_direct_write(file, &iov, 1, ppos); 1388 res = __fuse_direct_write(&io, &iov, 1, ppos);
1389 if (res > 0)
1390 fuse_write_update_size(inode, *ppos);
1268 mutex_unlock(&inode->i_mutex); 1391 mutex_unlock(&inode->i_mutex);
1269 1392
1270 return res; 1393 return res;
@@ -1373,6 +1496,7 @@ static int fuse_writepage_locked(struct page *page)
1373 if (!req) 1496 if (!req)
1374 goto err; 1497 goto err;
1375 1498
1499 req->background = 1; /* writeback always goes to bg_queue */
1376 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1500 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1377 if (!tmp_page) 1501 if (!tmp_page)
1378 goto err_free; 1502 goto err_free;
@@ -2226,21 +2350,93 @@ int fuse_notify_poll_wakeup(struct fuse_conn *fc,
2226 return 0; 2350 return 0;
2227} 2351}
2228 2352
2353static void fuse_do_truncate(struct file *file)
2354{
2355 struct inode *inode = file->f_mapping->host;
2356 struct iattr attr;
2357
2358 attr.ia_valid = ATTR_SIZE;
2359 attr.ia_size = i_size_read(inode);
2360
2361 attr.ia_file = file;
2362 attr.ia_valid |= ATTR_FILE;
2363
2364 fuse_do_setattr(inode, &attr, file);
2365}
2366
2229static ssize_t 2367static ssize_t
2230fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, 2368fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2231 loff_t offset, unsigned long nr_segs) 2369 loff_t offset, unsigned long nr_segs)
2232{ 2370{
2233 ssize_t ret = 0; 2371 ssize_t ret = 0;
2234 struct file *file = NULL; 2372 struct file *file = iocb->ki_filp;
2373 struct fuse_file *ff = file->private_data;
2235 loff_t pos = 0; 2374 loff_t pos = 0;
2375 struct inode *inode;
2376 loff_t i_size;
2377 size_t count = iov_length(iov, nr_segs);
2378 struct fuse_io_priv *io;
2236 2379
2237 file = iocb->ki_filp;
2238 pos = offset; 2380 pos = offset;
2381 inode = file->f_mapping->host;
2382 i_size = i_size_read(inode);
2383
2384 /* optimization for short read */
2385 if (rw != WRITE && offset + count > i_size) {
2386 if (offset >= i_size)
2387 return 0;
2388 count = i_size - offset;
2389 }
2390
2391 io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
2392 if (!io)
2393 return -ENOMEM;
2394 spin_lock_init(&io->lock);
2395 io->reqs = 1;
2396 io->bytes = -1;
2397 io->size = 0;
2398 io->offset = offset;
2399 io->write = (rw == WRITE);
2400 io->err = 0;
2401 io->file = file;
2402 /*
2403 * By default, we want to optimize all I/Os with async request
2404 * submission to the client filesystem if supported.
2405 */
2406 io->async = ff->fc->async_dio;
2407 io->iocb = iocb;
2408
2409 /*
2410 * We cannot asynchronously extend the size of a file. We have no method
2411 * to wait on real async I/O requests, so we must submit this request
2412 * synchronously.
2413 */
2414 if (!is_sync_kiocb(iocb) && (offset + count > i_size))
2415 io->async = false;
2239 2416
2240 if (rw == WRITE) 2417 if (rw == WRITE)
2241 ret = __fuse_direct_write(file, iov, nr_segs, &pos); 2418 ret = __fuse_direct_write(io, iov, nr_segs, &pos);
2242 else 2419 else
2243 ret = __fuse_direct_read(file, iov, nr_segs, &pos); 2420 ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
2421
2422 if (io->async) {
2423 fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
2424
2425 /* we have a non-extending, async request, so return */
2426 if (ret > 0 && !is_sync_kiocb(iocb))
2427 return -EIOCBQUEUED;
2428
2429 ret = wait_on_sync_kiocb(iocb);
2430 } else {
2431 kfree(io);
2432 }
2433
2434 if (rw == WRITE) {
2435 if (ret > 0)
2436 fuse_write_update_size(inode, pos);
2437 else if (ret < 0 && offset + count > i_size)
2438 fuse_do_truncate(file);
2439 }
2244 2440
2245 return ret; 2441 return ret;
2246} 2442}