diff options
Diffstat (limited to 'fs/fuse/dev.c')
| -rw-r--r-- | fs/fuse/dev.c | 225 |
1 files changed, 132 insertions, 93 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index e08ab4702d97..4526da8907c6 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
| @@ -21,18 +21,18 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); | |||
| 21 | 21 | ||
| 22 | static kmem_cache_t *fuse_req_cachep; | 22 | static kmem_cache_t *fuse_req_cachep; |
| 23 | 23 | ||
| 24 | static inline struct fuse_conn *fuse_get_conn(struct file *file) | 24 | static struct fuse_conn *fuse_get_conn(struct file *file) |
| 25 | { | 25 | { |
| 26 | struct fuse_conn *fc; | 26 | struct fuse_conn *fc; |
| 27 | spin_lock(&fuse_lock); | 27 | spin_lock(&fuse_lock); |
| 28 | fc = file->private_data; | 28 | fc = file->private_data; |
| 29 | if (fc && !fc->mounted) | 29 | if (fc && !fc->connected) |
| 30 | fc = NULL; | 30 | fc = NULL; |
| 31 | spin_unlock(&fuse_lock); | 31 | spin_unlock(&fuse_lock); |
| 32 | return fc; | 32 | return fc; |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | static inline void fuse_request_init(struct fuse_req *req) | 35 | static void fuse_request_init(struct fuse_req *req) |
| 36 | { | 36 | { |
| 37 | memset(req, 0, sizeof(*req)); | 37 | memset(req, 0, sizeof(*req)); |
| 38 | INIT_LIST_HEAD(&req->list); | 38 | INIT_LIST_HEAD(&req->list); |
| @@ -53,7 +53,7 @@ void fuse_request_free(struct fuse_req *req) | |||
| 53 | kmem_cache_free(fuse_req_cachep, req); | 53 | kmem_cache_free(fuse_req_cachep, req); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static inline void block_sigs(sigset_t *oldset) | 56 | static void block_sigs(sigset_t *oldset) |
| 57 | { | 57 | { |
| 58 | sigset_t mask; | 58 | sigset_t mask; |
| 59 | 59 | ||
| @@ -61,7 +61,7 @@ static inline void block_sigs(sigset_t *oldset) | |||
| 61 | sigprocmask(SIG_BLOCK, &mask, oldset); | 61 | sigprocmask(SIG_BLOCK, &mask, oldset); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static inline void restore_sigs(sigset_t *oldset) | 64 | static void restore_sigs(sigset_t *oldset) |
| 65 | { | 65 | { |
| 66 | sigprocmask(SIG_SETMASK, oldset, NULL); | 66 | sigprocmask(SIG_SETMASK, oldset, NULL); |
| 67 | } | 67 | } |
| @@ -109,18 +109,24 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc) | |||
| 109 | int intr; | 109 | int intr; |
| 110 | sigset_t oldset; | 110 | sigset_t oldset; |
| 111 | 111 | ||
| 112 | atomic_inc(&fc->num_waiting); | ||
| 112 | block_sigs(&oldset); | 113 | block_sigs(&oldset); |
| 113 | intr = down_interruptible(&fc->outstanding_sem); | 114 | intr = down_interruptible(&fc->outstanding_sem); |
| 114 | restore_sigs(&oldset); | 115 | restore_sigs(&oldset); |
| 115 | return intr ? NULL : do_get_request(fc); | 116 | if (intr) { |
| 117 | atomic_dec(&fc->num_waiting); | ||
| 118 | return NULL; | ||
| 119 | } | ||
| 120 | return do_get_request(fc); | ||
| 116 | } | 121 | } |
| 117 | 122 | ||
| 118 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) | 123 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) |
| 119 | { | 124 | { |
| 120 | spin_lock(&fuse_lock); | 125 | spin_lock(&fuse_lock); |
| 121 | if (req->preallocated) | 126 | if (req->preallocated) { |
| 127 | atomic_dec(&fc->num_waiting); | ||
| 122 | list_add(&req->list, &fc->unused_list); | 128 | list_add(&req->list, &fc->unused_list); |
| 123 | else | 129 | } else |
| 124 | fuse_request_free(req); | 130 | fuse_request_free(req); |
| 125 | 131 | ||
| 126 | /* If we are in debt decrease that first */ | 132 | /* If we are in debt decrease that first */ |
| @@ -148,42 +154,23 @@ void fuse_release_background(struct fuse_req *req) | |||
| 148 | spin_unlock(&fuse_lock); | 154 | spin_unlock(&fuse_lock); |
| 149 | } | 155 | } |
| 150 | 156 | ||
| 151 | static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | ||
| 152 | { | ||
| 153 | int i; | ||
| 154 | struct fuse_init_out *arg = &req->misc.init_out; | ||
| 155 | |||
| 156 | if (arg->major != FUSE_KERNEL_VERSION) | ||
| 157 | fc->conn_error = 1; | ||
| 158 | else { | ||
| 159 | fc->minor = arg->minor; | ||
| 160 | fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; | ||
| 161 | } | ||
| 162 | |||
| 163 | /* After INIT reply is received other requests can go | ||
| 164 | out. So do (FUSE_MAX_OUTSTANDING - 1) number of | ||
| 165 | up()s on outstanding_sem. The last up() is done in | ||
| 166 | fuse_putback_request() */ | ||
| 167 | for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) | ||
| 168 | up(&fc->outstanding_sem); | ||
| 169 | } | ||
| 170 | |||
| 171 | /* | 157 | /* |
| 172 | * This function is called when a request is finished. Either a reply | 158 | * This function is called when a request is finished. Either a reply |
| 173 | * has arrived or it was interrupted (and not yet sent) or some error | 159 | * has arrived or it was interrupted (and not yet sent) or some error |
| 174 | * occurred during communication with userspace, or the device file was | 160 | * occurred during communication with userspace, or the device file |
| 175 | * closed. It decreases the reference count for the request. In case | 161 | * was closed. In case of a background request the reference to the |
| 176 | * of a background request the reference to the stored objects are | 162 | * stored objects are released. The requester thread is woken up (if |
| 177 | * released. The requester thread is woken up (if still waiting), and | 163 | * still waiting), the 'end' callback is called if given, else the |
| 178 | * finally the request is either freed or put on the unused_list | 164 | * reference to the request is released |
| 179 | * | 165 | * |
| 180 | * Called with fuse_lock, unlocks it | 166 | * Called with fuse_lock, unlocks it |
| 181 | */ | 167 | */ |
| 182 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 168 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
| 183 | { | 169 | { |
| 184 | int putback; | 170 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
| 185 | req->finished = 1; | 171 | req->end = NULL; |
| 186 | putback = atomic_dec_and_test(&req->count); | 172 | list_del(&req->list); |
| 173 | req->state = FUSE_REQ_FINISHED; | ||
| 187 | spin_unlock(&fuse_lock); | 174 | spin_unlock(&fuse_lock); |
| 188 | if (req->background) { | 175 | if (req->background) { |
| 189 | down_read(&fc->sbput_sem); | 176 | down_read(&fc->sbput_sem); |
| @@ -192,18 +179,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
| 192 | up_read(&fc->sbput_sem); | 179 | up_read(&fc->sbput_sem); |
| 193 | } | 180 | } |
| 194 | wake_up(&req->waitq); | 181 | wake_up(&req->waitq); |
| 195 | if (req->in.h.opcode == FUSE_INIT) | 182 | if (end) |
| 196 | process_init_reply(fc, req); | 183 | end(fc, req); |
| 197 | else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) { | 184 | else |
| 198 | /* Special case for failed iget in CREATE */ | 185 | fuse_put_request(fc, req); |
| 199 | u64 nodeid = req->in.h.nodeid; | ||
| 200 | __fuse_get_request(req); | ||
| 201 | fuse_reset_request(req); | ||
| 202 | fuse_send_forget(fc, req, nodeid, 1); | ||
| 203 | putback = 0; | ||
| 204 | } | ||
| 205 | if (putback) | ||
| 206 | fuse_putback_request(fc, req); | ||
| 207 | } | 186 | } |
| 208 | 187 | ||
| 209 | /* | 188 | /* |
| @@ -254,14 +233,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
| 254 | 233 | ||
| 255 | spin_unlock(&fuse_lock); | 234 | spin_unlock(&fuse_lock); |
| 256 | block_sigs(&oldset); | 235 | block_sigs(&oldset); |
| 257 | wait_event_interruptible(req->waitq, req->finished); | 236 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); |
| 258 | restore_sigs(&oldset); | 237 | restore_sigs(&oldset); |
| 259 | spin_lock(&fuse_lock); | 238 | spin_lock(&fuse_lock); |
| 260 | if (req->finished) | 239 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
| 261 | return; | 240 | return; |
| 262 | 241 | ||
| 263 | req->out.h.error = -EINTR; | 242 | if (!req->interrupted) { |
| 264 | req->interrupted = 1; | 243 | req->out.h.error = -EINTR; |
| 244 | req->interrupted = 1; | ||
| 245 | } | ||
| 265 | if (req->locked) { | 246 | if (req->locked) { |
| 266 | /* This is uninterruptible sleep, because data is | 247 | /* This is uninterruptible sleep, because data is |
| 267 | being copied to/from the buffers of req. During | 248 | being copied to/from the buffers of req. During |
| @@ -272,10 +253,10 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
| 272 | wait_event(req->waitq, !req->locked); | 253 | wait_event(req->waitq, !req->locked); |
| 273 | spin_lock(&fuse_lock); | 254 | spin_lock(&fuse_lock); |
| 274 | } | 255 | } |
| 275 | if (!req->sent && !list_empty(&req->list)) { | 256 | if (req->state == FUSE_REQ_PENDING) { |
| 276 | list_del(&req->list); | 257 | list_del(&req->list); |
| 277 | __fuse_put_request(req); | 258 | __fuse_put_request(req); |
| 278 | } else if (!req->finished && req->sent) | 259 | } else if (req->state == FUSE_REQ_SENT) |
| 279 | background_request(fc, req); | 260 | background_request(fc, req); |
| 280 | } | 261 | } |
| 281 | 262 | ||
| @@ -310,6 +291,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |||
| 310 | fc->outstanding_debt++; | 291 | fc->outstanding_debt++; |
| 311 | } | 292 | } |
| 312 | list_add_tail(&req->list, &fc->pending); | 293 | list_add_tail(&req->list, &fc->pending); |
| 294 | req->state = FUSE_REQ_PENDING; | ||
| 313 | wake_up(&fc->waitq); | 295 | wake_up(&fc->waitq); |
| 314 | } | 296 | } |
| 315 | 297 | ||
| @@ -362,34 +344,12 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req) | |||
| 362 | request_send_nowait(fc, req); | 344 | request_send_nowait(fc, req); |
| 363 | } | 345 | } |
| 364 | 346 | ||
| 365 | void fuse_send_init(struct fuse_conn *fc) | ||
| 366 | { | ||
| 367 | /* This is called from fuse_read_super() so there's guaranteed | ||
| 368 | to be a request available */ | ||
| 369 | struct fuse_req *req = do_get_request(fc); | ||
| 370 | struct fuse_init_in *arg = &req->misc.init_in; | ||
| 371 | arg->major = FUSE_KERNEL_VERSION; | ||
| 372 | arg->minor = FUSE_KERNEL_MINOR_VERSION; | ||
| 373 | req->in.h.opcode = FUSE_INIT; | ||
| 374 | req->in.numargs = 1; | ||
| 375 | req->in.args[0].size = sizeof(*arg); | ||
| 376 | req->in.args[0].value = arg; | ||
| 377 | req->out.numargs = 1; | ||
| 378 | /* Variable length arguement used for backward compatibility | ||
| 379 | with interface version < 7.5. Rest of init_out is zeroed | ||
| 380 | by do_get_request(), so a short reply is not a problem */ | ||
| 381 | req->out.argvar = 1; | ||
| 382 | req->out.args[0].size = sizeof(struct fuse_init_out); | ||
| 383 | req->out.args[0].value = &req->misc.init_out; | ||
| 384 | request_send_background(fc, req); | ||
| 385 | } | ||
| 386 | |||
| 387 | /* | 347 | /* |
| 388 | * Lock the request. Up to the next unlock_request() there mustn't be | 348 | * Lock the request. Up to the next unlock_request() there mustn't be |
| 389 | * anything that could cause a page-fault. If the request was already | 349 | * anything that could cause a page-fault. If the request was already |
| 390 | * interrupted bail out. | 350 | * interrupted bail out. |
| 391 | */ | 351 | */ |
| 392 | static inline int lock_request(struct fuse_req *req) | 352 | static int lock_request(struct fuse_req *req) |
| 393 | { | 353 | { |
| 394 | int err = 0; | 354 | int err = 0; |
| 395 | if (req) { | 355 | if (req) { |
| @@ -408,7 +368,7 @@ static inline int lock_request(struct fuse_req *req) | |||
| 408 | * requester thread is currently waiting for it to be unlocked, so | 368 | * requester thread is currently waiting for it to be unlocked, so |
| 409 | * wake it up. | 369 | * wake it up. |
| 410 | */ | 370 | */ |
| 411 | static inline void unlock_request(struct fuse_req *req) | 371 | static void unlock_request(struct fuse_req *req) |
| 412 | { | 372 | { |
| 413 | if (req) { | 373 | if (req) { |
| 414 | spin_lock(&fuse_lock); | 374 | spin_lock(&fuse_lock); |
| @@ -444,7 +404,7 @@ static void fuse_copy_init(struct fuse_copy_state *cs, int write, | |||
| 444 | } | 404 | } |
| 445 | 405 | ||
| 446 | /* Unmap and put previous page of userspace buffer */ | 406 | /* Unmap and put previous page of userspace buffer */ |
| 447 | static inline void fuse_copy_finish(struct fuse_copy_state *cs) | 407 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
| 448 | { | 408 | { |
| 449 | if (cs->mapaddr) { | 409 | if (cs->mapaddr) { |
| 450 | kunmap_atomic(cs->mapaddr, KM_USER0); | 410 | kunmap_atomic(cs->mapaddr, KM_USER0); |
| @@ -493,8 +453,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
| 493 | } | 453 | } |
| 494 | 454 | ||
| 495 | /* Do as much copy to/from userspace buffer as we can */ | 455 | /* Do as much copy to/from userspace buffer as we can */ |
| 496 | static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, | 456 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
| 497 | unsigned *size) | ||
| 498 | { | 457 | { |
| 499 | unsigned ncpy = min(*size, cs->len); | 458 | unsigned ncpy = min(*size, cs->len); |
| 500 | if (val) { | 459 | if (val) { |
| @@ -514,8 +473,8 @@ static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, | |||
| 514 | * Copy a page in the request to/from the userspace buffer. Must be | 473 | * Copy a page in the request to/from the userspace buffer. Must be |
| 515 | * done atomically | 474 | * done atomically |
| 516 | */ | 475 | */ |
| 517 | static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, | 476 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, |
| 518 | unsigned offset, unsigned count, int zeroing) | 477 | unsigned offset, unsigned count, int zeroing) |
| 519 | { | 478 | { |
| 520 | if (page && zeroing && count < PAGE_SIZE) { | 479 | if (page && zeroing && count < PAGE_SIZE) { |
| 521 | void *mapaddr = kmap_atomic(page, KM_USER1); | 480 | void *mapaddr = kmap_atomic(page, KM_USER1); |
| @@ -597,7 +556,7 @@ static void request_wait(struct fuse_conn *fc) | |||
| 597 | DECLARE_WAITQUEUE(wait, current); | 556 | DECLARE_WAITQUEUE(wait, current); |
| 598 | 557 | ||
| 599 | add_wait_queue_exclusive(&fc->waitq, &wait); | 558 | add_wait_queue_exclusive(&fc->waitq, &wait); |
| 600 | while (fc->mounted && list_empty(&fc->pending)) { | 559 | while (fc->connected && list_empty(&fc->pending)) { |
| 601 | set_current_state(TASK_INTERRUPTIBLE); | 560 | set_current_state(TASK_INTERRUPTIBLE); |
| 602 | if (signal_pending(current)) | 561 | if (signal_pending(current)) |
| 603 | break; | 562 | break; |
| @@ -637,14 +596,15 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
| 637 | goto err_unlock; | 596 | goto err_unlock; |
| 638 | request_wait(fc); | 597 | request_wait(fc); |
| 639 | err = -ENODEV; | 598 | err = -ENODEV; |
| 640 | if (!fc->mounted) | 599 | if (!fc->connected) |
| 641 | goto err_unlock; | 600 | goto err_unlock; |
| 642 | err = -ERESTARTSYS; | 601 | err = -ERESTARTSYS; |
| 643 | if (list_empty(&fc->pending)) | 602 | if (list_empty(&fc->pending)) |
| 644 | goto err_unlock; | 603 | goto err_unlock; |
| 645 | 604 | ||
| 646 | req = list_entry(fc->pending.next, struct fuse_req, list); | 605 | req = list_entry(fc->pending.next, struct fuse_req, list); |
| 647 | list_del_init(&req->list); | 606 | req->state = FUSE_REQ_READING; |
| 607 | list_move(&req->list, &fc->io); | ||
| 648 | 608 | ||
| 649 | in = &req->in; | 609 | in = &req->in; |
| 650 | reqsize = in->h.len; | 610 | reqsize = in->h.len; |
| @@ -677,8 +637,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
| 677 | if (!req->isreply) | 637 | if (!req->isreply) |
| 678 | request_end(fc, req); | 638 | request_end(fc, req); |
| 679 | else { | 639 | else { |
| 680 | req->sent = 1; | 640 | req->state = FUSE_REQ_SENT; |
| 681 | list_add_tail(&req->list, &fc->processing); | 641 | list_move_tail(&req->list, &fc->processing); |
| 682 | spin_unlock(&fuse_lock); | 642 | spin_unlock(&fuse_lock); |
| 683 | } | 643 | } |
| 684 | return reqsize; | 644 | return reqsize; |
| @@ -766,17 +726,23 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
| 766 | goto err_finish; | 726 | goto err_finish; |
| 767 | 727 | ||
| 768 | spin_lock(&fuse_lock); | 728 | spin_lock(&fuse_lock); |
| 729 | err = -ENOENT; | ||
| 730 | if (!fc->connected) | ||
| 731 | goto err_unlock; | ||
| 732 | |||
| 769 | req = request_find(fc, oh.unique); | 733 | req = request_find(fc, oh.unique); |
| 770 | err = -EINVAL; | 734 | err = -EINVAL; |
| 771 | if (!req) | 735 | if (!req) |
| 772 | goto err_unlock; | 736 | goto err_unlock; |
| 773 | 737 | ||
| 774 | list_del_init(&req->list); | ||
| 775 | if (req->interrupted) { | 738 | if (req->interrupted) { |
| 776 | request_end(fc, req); | 739 | spin_unlock(&fuse_lock); |
| 777 | fuse_copy_finish(&cs); | 740 | fuse_copy_finish(&cs); |
| 741 | spin_lock(&fuse_lock); | ||
| 742 | request_end(fc, req); | ||
| 778 | return -ENOENT; | 743 | return -ENOENT; |
| 779 | } | 744 | } |
| 745 | list_move(&req->list, &fc->io); | ||
| 780 | req->out.h = oh; | 746 | req->out.h = oh; |
| 781 | req->locked = 1; | 747 | req->locked = 1; |
| 782 | cs.req = req; | 748 | cs.req = req; |
| @@ -830,19 +796,90 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |||
| 830 | return mask; | 796 | return mask; |
| 831 | } | 797 | } |
| 832 | 798 | ||
| 833 | /* Abort all requests on the given list (pending or processing) */ | 799 | /* |
| 800 | * Abort all requests on the given list (pending or processing) | ||
| 801 | * | ||
| 802 | * This function releases and reacquires fuse_lock | ||
| 803 | */ | ||
| 834 | static void end_requests(struct fuse_conn *fc, struct list_head *head) | 804 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
| 835 | { | 805 | { |
| 836 | while (!list_empty(head)) { | 806 | while (!list_empty(head)) { |
| 837 | struct fuse_req *req; | 807 | struct fuse_req *req; |
| 838 | req = list_entry(head->next, struct fuse_req, list); | 808 | req = list_entry(head->next, struct fuse_req, list); |
| 839 | list_del_init(&req->list); | ||
| 840 | req->out.h.error = -ECONNABORTED; | 809 | req->out.h.error = -ECONNABORTED; |
| 841 | request_end(fc, req); | 810 | request_end(fc, req); |
| 842 | spin_lock(&fuse_lock); | 811 | spin_lock(&fuse_lock); |
| 843 | } | 812 | } |
| 844 | } | 813 | } |
| 845 | 814 | ||
| 815 | /* | ||
| 816 | * Abort requests under I/O | ||
| 817 | * | ||
| 818 | * The requests are set to interrupted and finished, and the request | ||
| 819 | * waiter is woken up. This will make request_wait_answer() wait | ||
| 820 | * until the request is unlocked and then return. | ||
| 821 | * | ||
| 822 | * If the request is asynchronous, then the end function needs to be | ||
| 823 | * called after waiting for the request to be unlocked (if it was | ||
| 824 | * locked). | ||
| 825 | */ | ||
| 826 | static void end_io_requests(struct fuse_conn *fc) | ||
| 827 | { | ||
| 828 | while (!list_empty(&fc->io)) { | ||
| 829 | struct fuse_req *req = | ||
| 830 | list_entry(fc->io.next, struct fuse_req, list); | ||
| 831 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | ||
| 832 | |||
| 833 | req->interrupted = 1; | ||
| 834 | req->out.h.error = -ECONNABORTED; | ||
| 835 | req->state = FUSE_REQ_FINISHED; | ||
| 836 | list_del_init(&req->list); | ||
| 837 | wake_up(&req->waitq); | ||
| 838 | if (end) { | ||
| 839 | req->end = NULL; | ||
| 840 | /* The end function will consume this reference */ | ||
| 841 | __fuse_get_request(req); | ||
| 842 | spin_unlock(&fuse_lock); | ||
| 843 | wait_event(req->waitq, !req->locked); | ||
| 844 | end(fc, req); | ||
| 845 | spin_lock(&fuse_lock); | ||
| 846 | } | ||
| 847 | } | ||
| 848 | } | ||
| 849 | |||
| 850 | /* | ||
| 851 | * Abort all requests. | ||
| 852 | * | ||
| 853 | * Emergency exit in case of a malicious or accidental deadlock, or | ||
| 854 | * just a hung filesystem. | ||
| 855 | * | ||
| 856 | * The same effect is usually achievable through killing the | ||
| 857 | * filesystem daemon and all users of the filesystem. The exception | ||
| 858 | * is the combination of an asynchronous request and the tricky | ||
| 859 | * deadlock (see Documentation/filesystems/fuse.txt). | ||
| 860 | * | ||
| 861 | * During the aborting, progression of requests from the pending and | ||
| 862 | * processing lists onto the io list, and progression of new requests | ||
| 863 | * onto the pending list is prevented by req->connected being false. | ||
| 864 | * | ||
| 865 | * Progression of requests under I/O to the processing list is | ||
| 866 | * prevented by the req->interrupted flag being true for these | ||
| 867 | * requests. For this reason requests on the io list must be aborted | ||
| 868 | * first. | ||
| 869 | */ | ||
| 870 | void fuse_abort_conn(struct fuse_conn *fc) | ||
| 871 | { | ||
| 872 | spin_lock(&fuse_lock); | ||
| 873 | if (fc->connected) { | ||
| 874 | fc->connected = 0; | ||
| 875 | end_io_requests(fc); | ||
| 876 | end_requests(fc, &fc->pending); | ||
| 877 | end_requests(fc, &fc->processing); | ||
| 878 | wake_up_all(&fc->waitq); | ||
| 879 | } | ||
| 880 | spin_unlock(&fuse_lock); | ||
| 881 | } | ||
| 882 | |||
| 846 | static int fuse_dev_release(struct inode *inode, struct file *file) | 883 | static int fuse_dev_release(struct inode *inode, struct file *file) |
| 847 | { | 884 | { |
| 848 | struct fuse_conn *fc; | 885 | struct fuse_conn *fc; |
| @@ -853,9 +890,11 @@ static int fuse_dev_release(struct inode *inode, struct file *file) | |||
| 853 | fc->connected = 0; | 890 | fc->connected = 0; |
| 854 | end_requests(fc, &fc->pending); | 891 | end_requests(fc, &fc->pending); |
| 855 | end_requests(fc, &fc->processing); | 892 | end_requests(fc, &fc->processing); |
| 856 | fuse_release_conn(fc); | ||
| 857 | } | 893 | } |
| 858 | spin_unlock(&fuse_lock); | 894 | spin_unlock(&fuse_lock); |
| 895 | if (fc) | ||
| 896 | kobject_put(&fc->kobj); | ||
| 897 | |||
| 859 | return 0; | 898 | return 0; |
| 860 | } | 899 | } |
| 861 | 900 | ||
