diff options
-rw-r--r-- | fs/fuse/cuse.c | 10 | ||||
-rw-r--r-- | fs/fuse/dev.c | 80 | ||||
-rw-r--r-- | fs/fuse/dir.c | 17 | ||||
-rw-r--r-- | fs/fuse/file.c | 272 | ||||
-rw-r--r-- | fs/fuse/fuse_i.h | 36 | ||||
-rw-r--r-- | fs/fuse/inode.c | 12 | ||||
-rw-r--r-- | include/uapi/linux/fuse.h | 7 |
7 files changed, 362 insertions, 72 deletions
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index 6f96a8def147..b3aaf7b3578b 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c | |||
@@ -92,8 +92,9 @@ static ssize_t cuse_read(struct file *file, char __user *buf, size_t count, | |||
92 | { | 92 | { |
93 | loff_t pos = 0; | 93 | loff_t pos = 0; |
94 | struct iovec iov = { .iov_base = buf, .iov_len = count }; | 94 | struct iovec iov = { .iov_base = buf, .iov_len = count }; |
95 | struct fuse_io_priv io = { .async = 0, .file = file }; | ||
95 | 96 | ||
96 | return fuse_direct_io(file, &iov, 1, count, &pos, 0); | 97 | return fuse_direct_io(&io, &iov, 1, count, &pos, 0); |
97 | } | 98 | } |
98 | 99 | ||
99 | static ssize_t cuse_write(struct file *file, const char __user *buf, | 100 | static ssize_t cuse_write(struct file *file, const char __user *buf, |
@@ -101,12 +102,13 @@ static ssize_t cuse_write(struct file *file, const char __user *buf, | |||
101 | { | 102 | { |
102 | loff_t pos = 0; | 103 | loff_t pos = 0; |
103 | struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; | 104 | struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; |
105 | struct fuse_io_priv io = { .async = 0, .file = file }; | ||
104 | 106 | ||
105 | /* | 107 | /* |
106 | * No locking or generic_write_checks(), the server is | 108 | * No locking or generic_write_checks(), the server is |
107 | * responsible for locking and sanity checks. | 109 | * responsible for locking and sanity checks. |
108 | */ | 110 | */ |
109 | return fuse_direct_io(file, &iov, 1, count, &pos, 1); | 111 | return fuse_direct_io(&io, &iov, 1, count, &pos, 1); |
110 | } | 112 | } |
111 | 113 | ||
112 | static int cuse_open(struct inode *inode, struct file *file) | 114 | static int cuse_open(struct inode *inode, struct file *file) |
@@ -422,7 +424,7 @@ static int cuse_send_init(struct cuse_conn *cc) | |||
422 | 424 | ||
423 | BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); | 425 | BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); |
424 | 426 | ||
425 | req = fuse_get_req(fc, 1); | 427 | req = fuse_get_req_for_background(fc, 1); |
426 | if (IS_ERR(req)) { | 428 | if (IS_ERR(req)) { |
427 | rc = PTR_ERR(req); | 429 | rc = PTR_ERR(req); |
428 | goto err; | 430 | goto err; |
@@ -504,7 +506,7 @@ static int cuse_channel_open(struct inode *inode, struct file *file) | |||
504 | cc->fc.release = cuse_fc_release; | 506 | cc->fc.release = cuse_fc_release; |
505 | 507 | ||
506 | cc->fc.connected = 1; | 508 | cc->fc.connected = 1; |
507 | cc->fc.blocked = 0; | 509 | cc->fc.initialized = 1; |
508 | rc = cuse_send_init(cc); | 510 | rc = cuse_send_init(cc); |
509 | if (rc) { | 511 | if (rc) { |
510 | fuse_conn_put(&cc->fc); | 512 | fuse_conn_put(&cc->fc); |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 9bfd1a3214e6..a6c1664e330b 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -111,7 +111,7 @@ static void restore_sigs(sigset_t *oldset) | |||
111 | sigprocmask(SIG_SETMASK, oldset, NULL); | 111 | sigprocmask(SIG_SETMASK, oldset, NULL); |
112 | } | 112 | } |
113 | 113 | ||
114 | static void __fuse_get_request(struct fuse_req *req) | 114 | void __fuse_get_request(struct fuse_req *req) |
115 | { | 115 | { |
116 | atomic_inc(&req->count); | 116 | atomic_inc(&req->count); |
117 | } | 117 | } |
@@ -130,20 +130,30 @@ static void fuse_req_init_context(struct fuse_req *req) | |||
130 | req->in.h.pid = current->pid; | 130 | req->in.h.pid = current->pid; |
131 | } | 131 | } |
132 | 132 | ||
133 | struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) | 133 | static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) |
134 | { | ||
135 | return !fc->initialized || (for_background && fc->blocked); | ||
136 | } | ||
137 | |||
138 | static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, | ||
139 | bool for_background) | ||
134 | { | 140 | { |
135 | struct fuse_req *req; | 141 | struct fuse_req *req; |
136 | sigset_t oldset; | ||
137 | int intr; | ||
138 | int err; | 142 | int err; |
139 | |||
140 | atomic_inc(&fc->num_waiting); | 143 | atomic_inc(&fc->num_waiting); |
141 | block_sigs(&oldset); | 144 | |
142 | intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); | 145 | if (fuse_block_alloc(fc, for_background)) { |
143 | restore_sigs(&oldset); | 146 | sigset_t oldset; |
144 | err = -EINTR; | 147 | int intr; |
145 | if (intr) | 148 | |
146 | goto out; | 149 | block_sigs(&oldset); |
150 | intr = wait_event_interruptible_exclusive(fc->blocked_waitq, | ||
151 | !fuse_block_alloc(fc, for_background)); | ||
152 | restore_sigs(&oldset); | ||
153 | err = -EINTR; | ||
154 | if (intr) | ||
155 | goto out; | ||
156 | } | ||
147 | 157 | ||
148 | err = -ENOTCONN; | 158 | err = -ENOTCONN; |
149 | if (!fc->connected) | 159 | if (!fc->connected) |
@@ -151,19 +161,35 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) | |||
151 | 161 | ||
152 | req = fuse_request_alloc(npages); | 162 | req = fuse_request_alloc(npages); |
153 | err = -ENOMEM; | 163 | err = -ENOMEM; |
154 | if (!req) | 164 | if (!req) { |
165 | if (for_background) | ||
166 | wake_up(&fc->blocked_waitq); | ||
155 | goto out; | 167 | goto out; |
168 | } | ||
156 | 169 | ||
157 | fuse_req_init_context(req); | 170 | fuse_req_init_context(req); |
158 | req->waiting = 1; | 171 | req->waiting = 1; |
172 | req->background = for_background; | ||
159 | return req; | 173 | return req; |
160 | 174 | ||
161 | out: | 175 | out: |
162 | atomic_dec(&fc->num_waiting); | 176 | atomic_dec(&fc->num_waiting); |
163 | return ERR_PTR(err); | 177 | return ERR_PTR(err); |
164 | } | 178 | } |
179 | |||
180 | struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) | ||
181 | { | ||
182 | return __fuse_get_req(fc, npages, false); | ||
183 | } | ||
165 | EXPORT_SYMBOL_GPL(fuse_get_req); | 184 | EXPORT_SYMBOL_GPL(fuse_get_req); |
166 | 185 | ||
186 | struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, | ||
187 | unsigned npages) | ||
188 | { | ||
189 | return __fuse_get_req(fc, npages, true); | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(fuse_get_req_for_background); | ||
192 | |||
167 | /* | 193 | /* |
168 | * Return request in fuse_file->reserved_req. However that may | 194 | * Return request in fuse_file->reserved_req. However that may |
169 | * currently be in use. If that is the case, wait for it to become | 195 | * currently be in use. If that is the case, wait for it to become |
@@ -225,19 +251,31 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, | |||
225 | struct fuse_req *req; | 251 | struct fuse_req *req; |
226 | 252 | ||
227 | atomic_inc(&fc->num_waiting); | 253 | atomic_inc(&fc->num_waiting); |
228 | wait_event(fc->blocked_waitq, !fc->blocked); | 254 | wait_event(fc->blocked_waitq, fc->initialized); |
229 | req = fuse_request_alloc(0); | 255 | req = fuse_request_alloc(0); |
230 | if (!req) | 256 | if (!req) |
231 | req = get_reserved_req(fc, file); | 257 | req = get_reserved_req(fc, file); |
232 | 258 | ||
233 | fuse_req_init_context(req); | 259 | fuse_req_init_context(req); |
234 | req->waiting = 1; | 260 | req->waiting = 1; |
261 | req->background = 0; | ||
235 | return req; | 262 | return req; |
236 | } | 263 | } |
237 | 264 | ||
238 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | 265 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
239 | { | 266 | { |
240 | if (atomic_dec_and_test(&req->count)) { | 267 | if (atomic_dec_and_test(&req->count)) { |
268 | if (unlikely(req->background)) { | ||
269 | /* | ||
270 | * We get here in the unlikely case that a background | ||
271 | * request was allocated but not sent | ||
272 | */ | ||
273 | spin_lock(&fc->lock); | ||
274 | if (!fc->blocked) | ||
275 | wake_up(&fc->blocked_waitq); | ||
276 | spin_unlock(&fc->lock); | ||
277 | } | ||
278 | |||
241 | if (req->waiting) | 279 | if (req->waiting) |
242 | atomic_dec(&fc->num_waiting); | 280 | atomic_dec(&fc->num_waiting); |
243 | 281 | ||
@@ -335,10 +373,15 @@ __releases(fc->lock) | |||
335 | list_del(&req->intr_entry); | 373 | list_del(&req->intr_entry); |
336 | req->state = FUSE_REQ_FINISHED; | 374 | req->state = FUSE_REQ_FINISHED; |
337 | if (req->background) { | 375 | if (req->background) { |
338 | if (fc->num_background == fc->max_background) { | 376 | req->background = 0; |
377 | |||
378 | if (fc->num_background == fc->max_background) | ||
339 | fc->blocked = 0; | 379 | fc->blocked = 0; |
340 | wake_up_all(&fc->blocked_waitq); | 380 | |
341 | } | 381 | /* Wake up next waiter, if any */ |
382 | if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) | ||
383 | wake_up(&fc->blocked_waitq); | ||
384 | |||
342 | if (fc->num_background == fc->congestion_threshold && | 385 | if (fc->num_background == fc->congestion_threshold && |
343 | fc->connected && fc->bdi_initialized) { | 386 | fc->connected && fc->bdi_initialized) { |
344 | clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); | 387 | clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); |
@@ -442,6 +485,7 @@ __acquires(fc->lock) | |||
442 | 485 | ||
443 | static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) | 486 | static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) |
444 | { | 487 | { |
488 | BUG_ON(req->background); | ||
445 | spin_lock(&fc->lock); | 489 | spin_lock(&fc->lock); |
446 | if (!fc->connected) | 490 | if (!fc->connected) |
447 | req->out.h.error = -ENOTCONN; | 491 | req->out.h.error = -ENOTCONN; |
@@ -469,7 +513,7 @@ EXPORT_SYMBOL_GPL(fuse_request_send); | |||
469 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, | 513 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, |
470 | struct fuse_req *req) | 514 | struct fuse_req *req) |
471 | { | 515 | { |
472 | req->background = 1; | 516 | BUG_ON(!req->background); |
473 | fc->num_background++; | 517 | fc->num_background++; |
474 | if (fc->num_background == fc->max_background) | 518 | if (fc->num_background == fc->max_background) |
475 | fc->blocked = 1; | 519 | fc->blocked = 1; |
@@ -2071,6 +2115,7 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
2071 | if (fc->connected) { | 2115 | if (fc->connected) { |
2072 | fc->connected = 0; | 2116 | fc->connected = 0; |
2073 | fc->blocked = 0; | 2117 | fc->blocked = 0; |
2118 | fc->initialized = 1; | ||
2074 | end_io_requests(fc); | 2119 | end_io_requests(fc); |
2075 | end_queued_requests(fc); | 2120 | end_queued_requests(fc); |
2076 | end_polls(fc); | 2121 | end_polls(fc); |
@@ -2089,6 +2134,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) | |||
2089 | spin_lock(&fc->lock); | 2134 | spin_lock(&fc->lock); |
2090 | fc->connected = 0; | 2135 | fc->connected = 0; |
2091 | fc->blocked = 0; | 2136 | fc->blocked = 0; |
2137 | fc->initialized = 1; | ||
2092 | end_queued_requests(fc); | 2138 | end_queued_requests(fc); |
2093 | end_polls(fc); | 2139 | end_polls(fc); |
2094 | wake_up_all(&fc->blocked_waitq); | 2140 | wake_up_all(&fc->blocked_waitq); |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index ff15522481d4..254df56b847b 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -1562,10 +1562,9 @@ void fuse_release_nowrite(struct inode *inode) | |||
1562 | * vmtruncate() doesn't allow for this case, so do the rlimit checking | 1562 | * vmtruncate() doesn't allow for this case, so do the rlimit checking |
1563 | * and the actual truncation by hand. | 1563 | * and the actual truncation by hand. |
1564 | */ | 1564 | */ |
1565 | static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, | 1565 | int fuse_do_setattr(struct inode *inode, struct iattr *attr, |
1566 | struct file *file) | 1566 | struct file *file) |
1567 | { | 1567 | { |
1568 | struct inode *inode = entry->d_inode; | ||
1569 | struct fuse_conn *fc = get_fuse_conn(inode); | 1568 | struct fuse_conn *fc = get_fuse_conn(inode); |
1570 | struct fuse_req *req; | 1569 | struct fuse_req *req; |
1571 | struct fuse_setattr_in inarg; | 1570 | struct fuse_setattr_in inarg; |
@@ -1574,9 +1573,6 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, | |||
1574 | loff_t oldsize; | 1573 | loff_t oldsize; |
1575 | int err; | 1574 | int err; |
1576 | 1575 | ||
1577 | if (!fuse_allow_current_process(fc)) | ||
1578 | return -EACCES; | ||
1579 | |||
1580 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) | 1576 | if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) |
1581 | attr->ia_valid |= ATTR_FORCE; | 1577 | attr->ia_valid |= ATTR_FORCE; |
1582 | 1578 | ||
@@ -1671,10 +1667,15 @@ error: | |||
1671 | 1667 | ||
1672 | static int fuse_setattr(struct dentry *entry, struct iattr *attr) | 1668 | static int fuse_setattr(struct dentry *entry, struct iattr *attr) |
1673 | { | 1669 | { |
1670 | struct inode *inode = entry->d_inode; | ||
1671 | |||
1672 | if (!fuse_allow_current_process(get_fuse_conn(inode))) | ||
1673 | return -EACCES; | ||
1674 | |||
1674 | if (attr->ia_valid & ATTR_FILE) | 1675 | if (attr->ia_valid & ATTR_FILE) |
1675 | return fuse_do_setattr(entry, attr, attr->ia_file); | 1676 | return fuse_do_setattr(inode, attr, attr->ia_file); |
1676 | else | 1677 | else |
1677 | return fuse_do_setattr(entry, attr, NULL); | 1678 | return fuse_do_setattr(inode, attr, NULL); |
1678 | } | 1679 | } |
1679 | 1680 | ||
1680 | static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, | 1681 | static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry, |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index d15c6f21c17f..4655e59d545b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -126,11 +126,13 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) | |||
126 | struct fuse_req *req = ff->reserved_req; | 126 | struct fuse_req *req = ff->reserved_req; |
127 | 127 | ||
128 | if (sync) { | 128 | if (sync) { |
129 | req->background = 0; | ||
129 | fuse_request_send(ff->fc, req); | 130 | fuse_request_send(ff->fc, req); |
130 | path_put(&req->misc.release.path); | 131 | path_put(&req->misc.release.path); |
131 | fuse_put_request(ff->fc, req); | 132 | fuse_put_request(ff->fc, req); |
132 | } else { | 133 | } else { |
133 | req->end = fuse_release_end; | 134 | req->end = fuse_release_end; |
135 | req->background = 1; | ||
134 | fuse_request_send_background(ff->fc, req); | 136 | fuse_request_send_background(ff->fc, req); |
135 | } | 137 | } |
136 | kfree(ff); | 138 | kfree(ff); |
@@ -282,6 +284,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags) | |||
282 | WARN_ON(atomic_read(&ff->count) > 1); | 284 | WARN_ON(atomic_read(&ff->count) > 1); |
283 | fuse_prepare_release(ff, flags, FUSE_RELEASE); | 285 | fuse_prepare_release(ff, flags, FUSE_RELEASE); |
284 | ff->reserved_req->force = 1; | 286 | ff->reserved_req->force = 1; |
287 | ff->reserved_req->background = 0; | ||
285 | fuse_request_send(ff->fc, ff->reserved_req); | 288 | fuse_request_send(ff->fc, ff->reserved_req); |
286 | fuse_put_request(ff->fc, ff->reserved_req); | 289 | fuse_put_request(ff->fc, ff->reserved_req); |
287 | kfree(ff); | 290 | kfree(ff); |
@@ -491,9 +494,115 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, | |||
491 | req->out.args[0].size = count; | 494 | req->out.args[0].size = count; |
492 | } | 495 | } |
493 | 496 | ||
494 | static size_t fuse_send_read(struct fuse_req *req, struct file *file, | 497 | static void fuse_release_user_pages(struct fuse_req *req, int write) |
498 | { | ||
499 | unsigned i; | ||
500 | |||
501 | for (i = 0; i < req->num_pages; i++) { | ||
502 | struct page *page = req->pages[i]; | ||
503 | if (write) | ||
504 | set_page_dirty_lock(page); | ||
505 | put_page(page); | ||
506 | } | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * In case of short read, the caller sets 'pos' to the position of | ||
511 | * actual end of fuse request in IO request. Otherwise, if bytes_requested | ||
512 | * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1. | ||
513 | * | ||
514 | * An example: | ||
515 | * User requested DIO read of 64K. It was splitted into two 32K fuse requests, | ||
516 | * both submitted asynchronously. The first of them was ACKed by userspace as | ||
517 | * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The | ||
518 | * second request was ACKed as short, e.g. only 1K was read, resulting in | ||
519 | * pos == 33K. | ||
520 | * | ||
521 | * Thus, when all fuse requests are completed, the minimal non-negative 'pos' | ||
522 | * will be equal to the length of the longest contiguous fragment of | ||
523 | * transferred data starting from the beginning of IO request. | ||
524 | */ | ||
525 | static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) | ||
526 | { | ||
527 | int left; | ||
528 | |||
529 | spin_lock(&io->lock); | ||
530 | if (err) | ||
531 | io->err = io->err ? : err; | ||
532 | else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes)) | ||
533 | io->bytes = pos; | ||
534 | |||
535 | left = --io->reqs; | ||
536 | spin_unlock(&io->lock); | ||
537 | |||
538 | if (!left) { | ||
539 | long res; | ||
540 | |||
541 | if (io->err) | ||
542 | res = io->err; | ||
543 | else if (io->bytes >= 0 && io->write) | ||
544 | res = -EIO; | ||
545 | else { | ||
546 | res = io->bytes < 0 ? io->size : io->bytes; | ||
547 | |||
548 | if (!is_sync_kiocb(io->iocb)) { | ||
549 | struct path *path = &io->iocb->ki_filp->f_path; | ||
550 | struct inode *inode = path->dentry->d_inode; | ||
551 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
552 | struct fuse_inode *fi = get_fuse_inode(inode); | ||
553 | |||
554 | spin_lock(&fc->lock); | ||
555 | fi->attr_version = ++fc->attr_version; | ||
556 | spin_unlock(&fc->lock); | ||
557 | } | ||
558 | } | ||
559 | |||
560 | aio_complete(io->iocb, res, 0); | ||
561 | kfree(io); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) | ||
566 | { | ||
567 | struct fuse_io_priv *io = req->io; | ||
568 | ssize_t pos = -1; | ||
569 | |||
570 | fuse_release_user_pages(req, !io->write); | ||
571 | |||
572 | if (io->write) { | ||
573 | if (req->misc.write.in.size != req->misc.write.out.size) | ||
574 | pos = req->misc.write.in.offset - io->offset + | ||
575 | req->misc.write.out.size; | ||
576 | } else { | ||
577 | if (req->misc.read.in.size != req->out.args[0].size) | ||
578 | pos = req->misc.read.in.offset - io->offset + | ||
579 | req->out.args[0].size; | ||
580 | } | ||
581 | |||
582 | fuse_aio_complete(io, req->out.h.error, pos); | ||
583 | } | ||
584 | |||
585 | static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, | ||
586 | size_t num_bytes, struct fuse_io_priv *io) | ||
587 | { | ||
588 | spin_lock(&io->lock); | ||
589 | io->size += num_bytes; | ||
590 | io->reqs++; | ||
591 | spin_unlock(&io->lock); | ||
592 | |||
593 | req->io = io; | ||
594 | req->end = fuse_aio_complete_req; | ||
595 | |||
596 | __fuse_get_request(req); | ||
597 | fuse_request_send_background(fc, req); | ||
598 | |||
599 | return num_bytes; | ||
600 | } | ||
601 | |||
602 | static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, | ||
495 | loff_t pos, size_t count, fl_owner_t owner) | 603 | loff_t pos, size_t count, fl_owner_t owner) |
496 | { | 604 | { |
605 | struct file *file = io->file; | ||
497 | struct fuse_file *ff = file->private_data; | 606 | struct fuse_file *ff = file->private_data; |
498 | struct fuse_conn *fc = ff->fc; | 607 | struct fuse_conn *fc = ff->fc; |
499 | 608 | ||
@@ -504,6 +613,10 @@ static size_t fuse_send_read(struct fuse_req *req, struct file *file, | |||
504 | inarg->read_flags |= FUSE_READ_LOCKOWNER; | 613 | inarg->read_flags |= FUSE_READ_LOCKOWNER; |
505 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | 614 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); |
506 | } | 615 | } |
616 | |||
617 | if (io->async) | ||
618 | return fuse_async_req_send(fc, req, count, io); | ||
619 | |||
507 | fuse_request_send(fc, req); | 620 | fuse_request_send(fc, req); |
508 | return req->out.args[0].size; | 621 | return req->out.args[0].size; |
509 | } | 622 | } |
@@ -524,6 +637,7 @@ static void fuse_read_update_size(struct inode *inode, loff_t size, | |||
524 | 637 | ||
525 | static int fuse_readpage(struct file *file, struct page *page) | 638 | static int fuse_readpage(struct file *file, struct page *page) |
526 | { | 639 | { |
640 | struct fuse_io_priv io = { .async = 0, .file = file }; | ||
527 | struct inode *inode = page->mapping->host; | 641 | struct inode *inode = page->mapping->host; |
528 | struct fuse_conn *fc = get_fuse_conn(inode); | 642 | struct fuse_conn *fc = get_fuse_conn(inode); |
529 | struct fuse_req *req; | 643 | struct fuse_req *req; |
@@ -556,7 +670,7 @@ static int fuse_readpage(struct file *file, struct page *page) | |||
556 | req->num_pages = 1; | 670 | req->num_pages = 1; |
557 | req->pages[0] = page; | 671 | req->pages[0] = page; |
558 | req->page_descs[0].length = count; | 672 | req->page_descs[0].length = count; |
559 | num_read = fuse_send_read(req, file, pos, count, NULL); | 673 | num_read = fuse_send_read(req, &io, pos, count, NULL); |
560 | err = req->out.h.error; | 674 | err = req->out.h.error; |
561 | fuse_put_request(fc, req); | 675 | fuse_put_request(fc, req); |
562 | 676 | ||
@@ -661,7 +775,12 @@ static int fuse_readpages_fill(void *_data, struct page *page) | |||
661 | int nr_alloc = min_t(unsigned, data->nr_pages, | 775 | int nr_alloc = min_t(unsigned, data->nr_pages, |
662 | FUSE_MAX_PAGES_PER_REQ); | 776 | FUSE_MAX_PAGES_PER_REQ); |
663 | fuse_send_readpages(req, data->file); | 777 | fuse_send_readpages(req, data->file); |
664 | data->req = req = fuse_get_req(fc, nr_alloc); | 778 | if (fc->async_read) |
779 | req = fuse_get_req_for_background(fc, nr_alloc); | ||
780 | else | ||
781 | req = fuse_get_req(fc, nr_alloc); | ||
782 | |||
783 | data->req = req; | ||
665 | if (IS_ERR(req)) { | 784 | if (IS_ERR(req)) { |
666 | unlock_page(page); | 785 | unlock_page(page); |
667 | return PTR_ERR(req); | 786 | return PTR_ERR(req); |
@@ -696,7 +815,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, | |||
696 | 815 | ||
697 | data.file = file; | 816 | data.file = file; |
698 | data.inode = inode; | 817 | data.inode = inode; |
699 | data.req = fuse_get_req(fc, nr_alloc); | 818 | if (fc->async_read) |
819 | data.req = fuse_get_req_for_background(fc, nr_alloc); | ||
820 | else | ||
821 | data.req = fuse_get_req(fc, nr_alloc); | ||
700 | data.nr_pages = nr_pages; | 822 | data.nr_pages = nr_pages; |
701 | err = PTR_ERR(data.req); | 823 | err = PTR_ERR(data.req); |
702 | if (IS_ERR(data.req)) | 824 | if (IS_ERR(data.req)) |
@@ -758,9 +880,10 @@ static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, | |||
758 | req->out.args[0].value = outarg; | 880 | req->out.args[0].value = outarg; |
759 | } | 881 | } |
760 | 882 | ||
761 | static size_t fuse_send_write(struct fuse_req *req, struct file *file, | 883 | static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, |
762 | loff_t pos, size_t count, fl_owner_t owner) | 884 | loff_t pos, size_t count, fl_owner_t owner) |
763 | { | 885 | { |
886 | struct file *file = io->file; | ||
764 | struct fuse_file *ff = file->private_data; | 887 | struct fuse_file *ff = file->private_data; |
765 | struct fuse_conn *fc = ff->fc; | 888 | struct fuse_conn *fc = ff->fc; |
766 | struct fuse_write_in *inarg = &req->misc.write.in; | 889 | struct fuse_write_in *inarg = &req->misc.write.in; |
@@ -771,6 +894,10 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file, | |||
771 | inarg->write_flags |= FUSE_WRITE_LOCKOWNER; | 894 | inarg->write_flags |= FUSE_WRITE_LOCKOWNER; |
772 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); | 895 | inarg->lock_owner = fuse_lock_owner_id(fc, owner); |
773 | } | 896 | } |
897 | |||
898 | if (io->async) | ||
899 | return fuse_async_req_send(fc, req, count, io); | ||
900 | |||
774 | fuse_request_send(fc, req); | 901 | fuse_request_send(fc, req); |
775 | return req->misc.write.out.size; | 902 | return req->misc.write.out.size; |
776 | } | 903 | } |
@@ -794,11 +921,12 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, | |||
794 | size_t res; | 921 | size_t res; |
795 | unsigned offset; | 922 | unsigned offset; |
796 | unsigned i; | 923 | unsigned i; |
924 | struct fuse_io_priv io = { .async = 0, .file = file }; | ||
797 | 925 | ||
798 | for (i = 0; i < req->num_pages; i++) | 926 | for (i = 0; i < req->num_pages; i++) |
799 | fuse_wait_on_page_writeback(inode, req->pages[i]->index); | 927 | fuse_wait_on_page_writeback(inode, req->pages[i]->index); |
800 | 928 | ||
801 | res = fuse_send_write(req, file, pos, count, NULL); | 929 | res = fuse_send_write(req, &io, pos, count, NULL); |
802 | 930 | ||
803 | offset = req->page_descs[0].offset; | 931 | offset = req->page_descs[0].offset; |
804 | count = res; | 932 | count = res; |
@@ -1033,18 +1161,6 @@ out: | |||
1033 | return written ? written : err; | 1161 | return written ? written : err; |
1034 | } | 1162 | } |
1035 | 1163 | ||
1036 | static void fuse_release_user_pages(struct fuse_req *req, int write) | ||
1037 | { | ||
1038 | unsigned i; | ||
1039 | |||
1040 | for (i = 0; i < req->num_pages; i++) { | ||
1041 | struct page *page = req->pages[i]; | ||
1042 | if (write) | ||
1043 | set_page_dirty_lock(page); | ||
1044 | put_page(page); | ||
1045 | } | ||
1046 | } | ||
1047 | |||
1048 | static inline void fuse_page_descs_length_init(struct fuse_req *req, | 1164 | static inline void fuse_page_descs_length_init(struct fuse_req *req, |
1049 | unsigned index, unsigned nr_pages) | 1165 | unsigned index, unsigned nr_pages) |
1050 | { | 1166 | { |
@@ -1146,10 +1262,11 @@ static inline int fuse_iter_npages(const struct iov_iter *ii_p) | |||
1146 | return min(npages, FUSE_MAX_PAGES_PER_REQ); | 1262 | return min(npages, FUSE_MAX_PAGES_PER_REQ); |
1147 | } | 1263 | } |
1148 | 1264 | ||
1149 | ssize_t fuse_direct_io(struct file *file, const struct iovec *iov, | 1265 | ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov, |
1150 | unsigned long nr_segs, size_t count, loff_t *ppos, | 1266 | unsigned long nr_segs, size_t count, loff_t *ppos, |
1151 | int write) | 1267 | int write) |
1152 | { | 1268 | { |
1269 | struct file *file = io->file; | ||
1153 | struct fuse_file *ff = file->private_data; | 1270 | struct fuse_file *ff = file->private_data; |
1154 | struct fuse_conn *fc = ff->fc; | 1271 | struct fuse_conn *fc = ff->fc; |
1155 | size_t nmax = write ? fc->max_write : fc->max_read; | 1272 | size_t nmax = write ? fc->max_write : fc->max_read; |
@@ -1175,11 +1292,12 @@ ssize_t fuse_direct_io(struct file *file, const struct iovec *iov, | |||
1175 | } | 1292 | } |
1176 | 1293 | ||
1177 | if (write) | 1294 | if (write) |
1178 | nres = fuse_send_write(req, file, pos, nbytes, owner); | 1295 | nres = fuse_send_write(req, io, pos, nbytes, owner); |
1179 | else | 1296 | else |
1180 | nres = fuse_send_read(req, file, pos, nbytes, owner); | 1297 | nres = fuse_send_read(req, io, pos, nbytes, owner); |
1181 | 1298 | ||
1182 | fuse_release_user_pages(req, !write); | 1299 | if (!io->async) |
1300 | fuse_release_user_pages(req, !write); | ||
1183 | if (req->out.h.error) { | 1301 | if (req->out.h.error) { |
1184 | if (!res) | 1302 | if (!res) |
1185 | res = req->out.h.error; | 1303 | res = req->out.h.error; |
@@ -1209,17 +1327,19 @@ ssize_t fuse_direct_io(struct file *file, const struct iovec *iov, | |||
1209 | } | 1327 | } |
1210 | EXPORT_SYMBOL_GPL(fuse_direct_io); | 1328 | EXPORT_SYMBOL_GPL(fuse_direct_io); |
1211 | 1329 | ||
1212 | static ssize_t __fuse_direct_read(struct file *file, const struct iovec *iov, | 1330 | static ssize_t __fuse_direct_read(struct fuse_io_priv *io, |
1213 | unsigned long nr_segs, loff_t *ppos) | 1331 | const struct iovec *iov, |
1332 | unsigned long nr_segs, loff_t *ppos, | ||
1333 | size_t count) | ||
1214 | { | 1334 | { |
1215 | ssize_t res; | 1335 | ssize_t res; |
1336 | struct file *file = io->file; | ||
1216 | struct inode *inode = file_inode(file); | 1337 | struct inode *inode = file_inode(file); |
1217 | 1338 | ||
1218 | if (is_bad_inode(inode)) | 1339 | if (is_bad_inode(inode)) |
1219 | return -EIO; | 1340 | return -EIO; |
1220 | 1341 | ||
1221 | res = fuse_direct_io(file, iov, nr_segs, iov_length(iov, nr_segs), | 1342 | res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0); |
1222 | ppos, 0); | ||
1223 | 1343 | ||
1224 | fuse_invalidate_attr(inode); | 1344 | fuse_invalidate_attr(inode); |
1225 | 1345 | ||
@@ -1229,23 +1349,23 @@ static ssize_t __fuse_direct_read(struct file *file, const struct iovec *iov, | |||
1229 | static ssize_t fuse_direct_read(struct file *file, char __user *buf, | 1349 | static ssize_t fuse_direct_read(struct file *file, char __user *buf, |
1230 | size_t count, loff_t *ppos) | 1350 | size_t count, loff_t *ppos) |
1231 | { | 1351 | { |
1352 | struct fuse_io_priv io = { .async = 0, .file = file }; | ||
1232 | struct iovec iov = { .iov_base = buf, .iov_len = count }; | 1353 | struct iovec iov = { .iov_base = buf, .iov_len = count }; |
1233 | return __fuse_direct_read(file, &iov, 1, ppos); | 1354 | return __fuse_direct_read(&io, &iov, 1, ppos, count); |
1234 | } | 1355 | } |
1235 | 1356 | ||
1236 | static ssize_t __fuse_direct_write(struct file *file, const struct iovec *iov, | 1357 | static ssize_t __fuse_direct_write(struct fuse_io_priv *io, |
1358 | const struct iovec *iov, | ||
1237 | unsigned long nr_segs, loff_t *ppos) | 1359 | unsigned long nr_segs, loff_t *ppos) |
1238 | { | 1360 | { |
1361 | struct file *file = io->file; | ||
1239 | struct inode *inode = file_inode(file); | 1362 | struct inode *inode = file_inode(file); |
1240 | size_t count = iov_length(iov, nr_segs); | 1363 | size_t count = iov_length(iov, nr_segs); |
1241 | ssize_t res; | 1364 | ssize_t res; |
1242 | 1365 | ||
1243 | res = generic_write_checks(file, ppos, &count, 0); | 1366 | res = generic_write_checks(file, ppos, &count, 0); |
1244 | if (!res) { | 1367 | if (!res) |
1245 | res = fuse_direct_io(file, iov, nr_segs, count, ppos, 1); | 1368 | res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1); |
1246 | if (res > 0) | ||
1247 | fuse_write_update_size(inode, *ppos); | ||
1248 | } | ||
1249 | 1369 | ||
1250 | fuse_invalidate_attr(inode); | 1370 | fuse_invalidate_attr(inode); |
1251 | 1371 | ||
@@ -1258,13 +1378,16 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, | |||
1258 | struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; | 1378 | struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count }; |
1259 | struct inode *inode = file_inode(file); | 1379 | struct inode *inode = file_inode(file); |
1260 | ssize_t res; | 1380 | ssize_t res; |
1381 | struct fuse_io_priv io = { .async = 0, .file = file }; | ||
1261 | 1382 | ||
1262 | if (is_bad_inode(inode)) | 1383 | if (is_bad_inode(inode)) |
1263 | return -EIO; | 1384 | return -EIO; |
1264 | 1385 | ||
1265 | /* Don't allow parallel writes to the same file */ | 1386 | /* Don't allow parallel writes to the same file */ |
1266 | mutex_lock(&inode->i_mutex); | 1387 | mutex_lock(&inode->i_mutex); |
1267 | res = __fuse_direct_write(file, &iov, 1, ppos); | 1388 | res = __fuse_direct_write(&io, &iov, 1, ppos); |
1389 | if (res > 0) | ||
1390 | fuse_write_update_size(inode, *ppos); | ||
1268 | mutex_unlock(&inode->i_mutex); | 1391 | mutex_unlock(&inode->i_mutex); |
1269 | 1392 | ||
1270 | return res; | 1393 | return res; |
@@ -1373,6 +1496,7 @@ static int fuse_writepage_locked(struct page *page) | |||
1373 | if (!req) | 1496 | if (!req) |
1374 | goto err; | 1497 | goto err; |
1375 | 1498 | ||
1499 | req->background = 1; /* writeback always goes to bg_queue */ | ||
1376 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | 1500 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); |
1377 | if (!tmp_page) | 1501 | if (!tmp_page) |
1378 | goto err_free; | 1502 | goto err_free; |
@@ -2226,21 +2350,93 @@ int fuse_notify_poll_wakeup(struct fuse_conn *fc, | |||
2226 | return 0; | 2350 | return 0; |
2227 | } | 2351 | } |
2228 | 2352 | ||
2353 | static void fuse_do_truncate(struct file *file) | ||
2354 | { | ||
2355 | struct inode *inode = file->f_mapping->host; | ||
2356 | struct iattr attr; | ||
2357 | |||
2358 | attr.ia_valid = ATTR_SIZE; | ||
2359 | attr.ia_size = i_size_read(inode); | ||
2360 | |||
2361 | attr.ia_file = file; | ||
2362 | attr.ia_valid |= ATTR_FILE; | ||
2363 | |||
2364 | fuse_do_setattr(inode, &attr, file); | ||
2365 | } | ||
2366 | |||
2229 | static ssize_t | 2367 | static ssize_t |
2230 | fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | 2368 | fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, |
2231 | loff_t offset, unsigned long nr_segs) | 2369 | loff_t offset, unsigned long nr_segs) |
2232 | { | 2370 | { |
2233 | ssize_t ret = 0; | 2371 | ssize_t ret = 0; |
2234 | struct file *file = NULL; | 2372 | struct file *file = iocb->ki_filp; |
2373 | struct fuse_file *ff = file->private_data; | ||
2235 | loff_t pos = 0; | 2374 | loff_t pos = 0; |
2375 | struct inode *inode; | ||
2376 | loff_t i_size; | ||
2377 | size_t count = iov_length(iov, nr_segs); | ||
2378 | struct fuse_io_priv *io; | ||
2236 | 2379 | ||
2237 | file = iocb->ki_filp; | ||
2238 | pos = offset; | 2380 | pos = offset; |
2381 | inode = file->f_mapping->host; | ||
2382 | i_size = i_size_read(inode); | ||
2383 | |||
2384 | /* optimization for short read */ | ||
2385 | if (rw != WRITE && offset + count > i_size) { | ||
2386 | if (offset >= i_size) | ||
2387 | return 0; | ||
2388 | count = i_size - offset; | ||
2389 | } | ||
2390 | |||
2391 | io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); | ||
2392 | if (!io) | ||
2393 | return -ENOMEM; | ||
2394 | spin_lock_init(&io->lock); | ||
2395 | io->reqs = 1; | ||
2396 | io->bytes = -1; | ||
2397 | io->size = 0; | ||
2398 | io->offset = offset; | ||
2399 | io->write = (rw == WRITE); | ||
2400 | io->err = 0; | ||
2401 | io->file = file; | ||
2402 | /* | ||
2403 | * By default, we want to optimize all I/Os with async request | ||
2404 | * submission to the client filesystem if supported. | ||
2405 | */ | ||
2406 | io->async = ff->fc->async_dio; | ||
2407 | io->iocb = iocb; | ||
2408 | |||
2409 | /* | ||
2410 | * We cannot asynchronously extend the size of a file. We have no method | ||
2411 | * to wait on real async I/O requests, so we must submit this request | ||
2412 | * synchronously. | ||
2413 | */ | ||
2414 | if (!is_sync_kiocb(iocb) && (offset + count > i_size)) | ||
2415 | io->async = false; | ||
2239 | 2416 | ||
2240 | if (rw == WRITE) | 2417 | if (rw == WRITE) |
2241 | ret = __fuse_direct_write(file, iov, nr_segs, &pos); | 2418 | ret = __fuse_direct_write(io, iov, nr_segs, &pos); |
2242 | else | 2419 | else |
2243 | ret = __fuse_direct_read(file, iov, nr_segs, &pos); | 2420 | ret = __fuse_direct_read(io, iov, nr_segs, &pos, count); |
2421 | |||
2422 | if (io->async) { | ||
2423 | fuse_aio_complete(io, ret < 0 ? ret : 0, -1); | ||
2424 | |||
2425 | /* we have a non-extending, async request, so return */ | ||
2426 | if (ret > 0 && !is_sync_kiocb(iocb)) | ||
2427 | return -EIOCBQUEUED; | ||
2428 | |||
2429 | ret = wait_on_sync_kiocb(iocb); | ||
2430 | } else { | ||
2431 | kfree(io); | ||
2432 | } | ||
2433 | |||
2434 | if (rw == WRITE) { | ||
2435 | if (ret > 0) | ||
2436 | fuse_write_update_size(inode, pos); | ||
2437 | else if (ret < 0 && offset + count > i_size) | ||
2438 | fuse_do_truncate(file); | ||
2439 | } | ||
2244 | 2440 | ||
2245 | return ret; | 2441 | return ret; |
2246 | } | 2442 | } |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 6aeba864f070..fde7249a3a96 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -228,6 +228,20 @@ enum fuse_req_state { | |||
228 | FUSE_REQ_FINISHED | 228 | FUSE_REQ_FINISHED |
229 | }; | 229 | }; |
230 | 230 | ||
231 | /** The request IO state (for asynchronous processing) */ | ||
232 | struct fuse_io_priv { | ||
233 | int async; | ||
234 | spinlock_t lock; | ||
235 | unsigned reqs; | ||
236 | ssize_t bytes; | ||
237 | size_t size; | ||
238 | __u64 offset; | ||
239 | bool write; | ||
240 | int err; | ||
241 | struct kiocb *iocb; | ||
242 | struct file *file; | ||
243 | }; | ||
244 | |||
231 | /** | 245 | /** |
232 | * A request to the client | 246 | * A request to the client |
233 | */ | 247 | */ |
@@ -332,6 +346,9 @@ struct fuse_req { | |||
332 | /** Inode used in the request or NULL */ | 346 | /** Inode used in the request or NULL */ |
333 | struct inode *inode; | 347 | struct inode *inode; |
334 | 348 | ||
349 | /** AIO control block */ | ||
350 | struct fuse_io_priv *io; | ||
351 | |||
335 | /** Link on fi->writepages */ | 352 | /** Link on fi->writepages */ |
336 | struct list_head writepages_entry; | 353 | struct list_head writepages_entry; |
337 | 354 | ||
@@ -417,6 +434,10 @@ struct fuse_conn { | |||
417 | /** Batching of FORGET requests (positive indicates FORGET batch) */ | 434 | /** Batching of FORGET requests (positive indicates FORGET batch) */ |
418 | int forget_batch; | 435 | int forget_batch; |
419 | 436 | ||
437 | /** Flag indicating that INIT reply has been received. Allocating | ||
438 | * any fuse request will be suspended until the flag is set */ | ||
439 | int initialized; | ||
440 | |||
420 | /** Flag indicating if connection is blocked. This will be | 441 | /** Flag indicating if connection is blocked. This will be |
421 | the case before the INIT reply is received, and if there | 442 | the case before the INIT reply is received, and if there |
422 | are too many outstading backgrounds requests */ | 443 | are too many outstading backgrounds requests */ |
@@ -520,6 +541,9 @@ struct fuse_conn { | |||
520 | /** Does the filesystem want adaptive readdirplus? */ | 541 | /** Does the filesystem want adaptive readdirplus? */ |
521 | unsigned readdirplus_auto:1; | 542 | unsigned readdirplus_auto:1; |
522 | 543 | ||
544 | /** Does the filesystem support asynchronous direct-IO submission? */ | ||
545 | unsigned async_dio:1; | ||
546 | |||
523 | /** The number of requests waiting for completion */ | 547 | /** The number of requests waiting for completion */ |
524 | atomic_t num_waiting; | 548 | atomic_t num_waiting; |
525 | 549 | ||
@@ -708,6 +732,13 @@ void fuse_request_free(struct fuse_req *req); | |||
708 | * caller should specify # elements in req->pages[] explicitly | 732 | * caller should specify # elements in req->pages[] explicitly |
709 | */ | 733 | */ |
710 | struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages); | 734 | struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages); |
735 | struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, | ||
736 | unsigned npages); | ||
737 | |||
738 | /* | ||
739 | * Increment reference count on request | ||
740 | */ | ||
741 | void __fuse_get_request(struct fuse_req *req); | ||
711 | 742 | ||
712 | /** | 743 | /** |
713 | * Get a request, may fail with -ENOMEM, | 744 | * Get a request, may fail with -ENOMEM, |
@@ -823,7 +854,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, | |||
823 | 854 | ||
824 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, | 855 | int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
825 | bool isdir); | 856 | bool isdir); |
826 | ssize_t fuse_direct_io(struct file *file, const struct iovec *iov, | 857 | ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov, |
827 | unsigned long nr_segs, size_t count, loff_t *ppos, | 858 | unsigned long nr_segs, size_t count, loff_t *ppos, |
828 | int write); | 859 | int write); |
829 | long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, | 860 | long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, |
@@ -835,4 +866,7 @@ int fuse_dev_release(struct inode *inode, struct file *file); | |||
835 | 866 | ||
836 | void fuse_write_update_size(struct inode *inode, loff_t pos); | 867 | void fuse_write_update_size(struct inode *inode, loff_t pos); |
837 | 868 | ||
869 | int fuse_do_setattr(struct inode *inode, struct iattr *attr, | ||
870 | struct file *file); | ||
871 | |||
838 | #endif /* _FS_FUSE_I_H */ | 872 | #endif /* _FS_FUSE_I_H */ |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 137185c3884f..6201f81e4d3a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -346,6 +346,7 @@ static void fuse_send_destroy(struct fuse_conn *fc) | |||
346 | fc->destroy_req = NULL; | 346 | fc->destroy_req = NULL; |
347 | req->in.h.opcode = FUSE_DESTROY; | 347 | req->in.h.opcode = FUSE_DESTROY; |
348 | req->force = 1; | 348 | req->force = 1; |
349 | req->background = 0; | ||
349 | fuse_request_send(fc, req); | 350 | fuse_request_send(fc, req); |
350 | fuse_put_request(fc, req); | 351 | fuse_put_request(fc, req); |
351 | } | 352 | } |
@@ -362,6 +363,7 @@ void fuse_conn_kill(struct fuse_conn *fc) | |||
362 | spin_lock(&fc->lock); | 363 | spin_lock(&fc->lock); |
363 | fc->connected = 0; | 364 | fc->connected = 0; |
364 | fc->blocked = 0; | 365 | fc->blocked = 0; |
366 | fc->initialized = 1; | ||
365 | spin_unlock(&fc->lock); | 367 | spin_unlock(&fc->lock); |
366 | /* Flush all readers on this fs */ | 368 | /* Flush all readers on this fs */ |
367 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 369 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
@@ -581,7 +583,8 @@ void fuse_conn_init(struct fuse_conn *fc) | |||
581 | fc->khctr = 0; | 583 | fc->khctr = 0; |
582 | fc->polled_files = RB_ROOT; | 584 | fc->polled_files = RB_ROOT; |
583 | fc->reqctr = 0; | 585 | fc->reqctr = 0; |
584 | fc->blocked = 1; | 586 | fc->blocked = 0; |
587 | fc->initialized = 0; | ||
585 | fc->attr_version = 1; | 588 | fc->attr_version = 1; |
586 | get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); | 589 | get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); |
587 | } | 590 | } |
@@ -868,6 +871,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
868 | fc->do_readdirplus = 1; | 871 | fc->do_readdirplus = 1; |
869 | if (arg->flags & FUSE_READDIRPLUS_AUTO) | 872 | if (arg->flags & FUSE_READDIRPLUS_AUTO) |
870 | fc->readdirplus_auto = 1; | 873 | fc->readdirplus_auto = 1; |
874 | if (arg->flags & FUSE_ASYNC_DIO) | ||
875 | fc->async_dio = 1; | ||
871 | } else { | 876 | } else { |
872 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; | 877 | ra_pages = fc->max_read / PAGE_CACHE_SIZE; |
873 | fc->no_lock = 1; | 878 | fc->no_lock = 1; |
@@ -880,7 +885,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | |||
880 | fc->max_write = max_t(unsigned, 4096, fc->max_write); | 885 | fc->max_write = max_t(unsigned, 4096, fc->max_write); |
881 | fc->conn_init = 1; | 886 | fc->conn_init = 1; |
882 | } | 887 | } |
883 | fc->blocked = 0; | 888 | fc->initialized = 1; |
884 | wake_up_all(&fc->blocked_waitq); | 889 | wake_up_all(&fc->blocked_waitq); |
885 | } | 890 | } |
886 | 891 | ||
@@ -895,7 +900,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) | |||
895 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | | 900 | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | |
896 | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | | 901 | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | |
897 | FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | | 902 | FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | |
898 | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO; | 903 | FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO; |
899 | req->in.h.opcode = FUSE_INIT; | 904 | req->in.h.opcode = FUSE_INIT; |
900 | req->in.numargs = 1; | 905 | req->in.numargs = 1; |
901 | req->in.args[0].size = sizeof(*arg); | 906 | req->in.args[0].size = sizeof(*arg); |
@@ -1043,6 +1048,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
1043 | init_req = fuse_request_alloc(0); | 1048 | init_req = fuse_request_alloc(0); |
1044 | if (!init_req) | 1049 | if (!init_req) |
1045 | goto err_put_root; | 1050 | goto err_put_root; |
1051 | init_req->background = 1; | ||
1046 | 1052 | ||
1047 | if (is_bdev) { | 1053 | if (is_bdev) { |
1048 | fc->destroy_req = fuse_request_alloc(0); | 1054 | fc->destroy_req = fuse_request_alloc(0); |
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 706d035fa748..60bb2f9f7b74 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h | |||
@@ -90,6 +90,9 @@ | |||
90 | * 7.21 | 90 | * 7.21 |
91 | * - add FUSE_READDIRPLUS | 91 | * - add FUSE_READDIRPLUS |
92 | * - send the requested events in POLL request | 92 | * - send the requested events in POLL request |
93 | * | ||
94 | * 7.22 | ||
95 | * - add FUSE_ASYNC_DIO | ||
93 | */ | 96 | */ |
94 | 97 | ||
95 | #ifndef _LINUX_FUSE_H | 98 | #ifndef _LINUX_FUSE_H |
@@ -125,7 +128,7 @@ | |||
125 | #define FUSE_KERNEL_VERSION 7 | 128 | #define FUSE_KERNEL_VERSION 7 |
126 | 129 | ||
127 | /** Minor version number of this interface */ | 130 | /** Minor version number of this interface */ |
128 | #define FUSE_KERNEL_MINOR_VERSION 21 | 131 | #define FUSE_KERNEL_MINOR_VERSION 22 |
129 | 132 | ||
130 | /** The node ID of the root inode */ | 133 | /** The node ID of the root inode */ |
131 | #define FUSE_ROOT_ID 1 | 134 | #define FUSE_ROOT_ID 1 |
@@ -215,6 +218,7 @@ struct fuse_file_lock { | |||
215 | * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages | 218 | * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages |
216 | * FUSE_DO_READDIRPLUS: do READDIRPLUS (READDIR+LOOKUP in one) | 219 | * FUSE_DO_READDIRPLUS: do READDIRPLUS (READDIR+LOOKUP in one) |
217 | * FUSE_READDIRPLUS_AUTO: adaptive readdirplus | 220 | * FUSE_READDIRPLUS_AUTO: adaptive readdirplus |
221 | * FUSE_ASYNC_DIO: asynchronous direct I/O submission | ||
218 | */ | 222 | */ |
219 | #define FUSE_ASYNC_READ (1 << 0) | 223 | #define FUSE_ASYNC_READ (1 << 0) |
220 | #define FUSE_POSIX_LOCKS (1 << 1) | 224 | #define FUSE_POSIX_LOCKS (1 << 1) |
@@ -231,6 +235,7 @@ struct fuse_file_lock { | |||
231 | #define FUSE_AUTO_INVAL_DATA (1 << 12) | 235 | #define FUSE_AUTO_INVAL_DATA (1 << 12) |
232 | #define FUSE_DO_READDIRPLUS (1 << 13) | 236 | #define FUSE_DO_READDIRPLUS (1 << 13) |
233 | #define FUSE_READDIRPLUS_AUTO (1 << 14) | 237 | #define FUSE_READDIRPLUS_AUTO (1 << 14) |
238 | #define FUSE_ASYNC_DIO (1 << 15) | ||
234 | 239 | ||
235 | /** | 240 | /** |
236 | * CUSE INIT request/reply flags | 241 | * CUSE INIT request/reply flags |