diff options
Diffstat (limited to 'fs/fuse')
| -rw-r--r-- | fs/fuse/dev.c | 225 | ||||
| -rw-r--r-- | fs/fuse/dir.c | 18 | ||||
| -rw-r--r-- | fs/fuse/file.c | 83 | ||||
| -rw-r--r-- | fs/fuse/fuse_i.h | 98 | ||||
| -rw-r--r-- | fs/fuse/inode.c | 268 |
5 files changed, 480 insertions, 212 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index e08ab4702d97..4526da8907c6 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
| @@ -21,18 +21,18 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); | |||
| 21 | 21 | ||
| 22 | static kmem_cache_t *fuse_req_cachep; | 22 | static kmem_cache_t *fuse_req_cachep; |
| 23 | 23 | ||
| 24 | static inline struct fuse_conn *fuse_get_conn(struct file *file) | 24 | static struct fuse_conn *fuse_get_conn(struct file *file) |
| 25 | { | 25 | { |
| 26 | struct fuse_conn *fc; | 26 | struct fuse_conn *fc; |
| 27 | spin_lock(&fuse_lock); | 27 | spin_lock(&fuse_lock); |
| 28 | fc = file->private_data; | 28 | fc = file->private_data; |
| 29 | if (fc && !fc->mounted) | 29 | if (fc && !fc->connected) |
| 30 | fc = NULL; | 30 | fc = NULL; |
| 31 | spin_unlock(&fuse_lock); | 31 | spin_unlock(&fuse_lock); |
| 32 | return fc; | 32 | return fc; |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | static inline void fuse_request_init(struct fuse_req *req) | 35 | static void fuse_request_init(struct fuse_req *req) |
| 36 | { | 36 | { |
| 37 | memset(req, 0, sizeof(*req)); | 37 | memset(req, 0, sizeof(*req)); |
| 38 | INIT_LIST_HEAD(&req->list); | 38 | INIT_LIST_HEAD(&req->list); |
| @@ -53,7 +53,7 @@ void fuse_request_free(struct fuse_req *req) | |||
| 53 | kmem_cache_free(fuse_req_cachep, req); | 53 | kmem_cache_free(fuse_req_cachep, req); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static inline void block_sigs(sigset_t *oldset) | 56 | static void block_sigs(sigset_t *oldset) |
| 57 | { | 57 | { |
| 58 | sigset_t mask; | 58 | sigset_t mask; |
| 59 | 59 | ||
| @@ -61,7 +61,7 @@ static inline void block_sigs(sigset_t *oldset) | |||
| 61 | sigprocmask(SIG_BLOCK, &mask, oldset); | 61 | sigprocmask(SIG_BLOCK, &mask, oldset); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static inline void restore_sigs(sigset_t *oldset) | 64 | static void restore_sigs(sigset_t *oldset) |
| 65 | { | 65 | { |
| 66 | sigprocmask(SIG_SETMASK, oldset, NULL); | 66 | sigprocmask(SIG_SETMASK, oldset, NULL); |
| 67 | } | 67 | } |
| @@ -109,18 +109,24 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc) | |||
| 109 | int intr; | 109 | int intr; |
| 110 | sigset_t oldset; | 110 | sigset_t oldset; |
| 111 | 111 | ||
| 112 | atomic_inc(&fc->num_waiting); | ||
| 112 | block_sigs(&oldset); | 113 | block_sigs(&oldset); |
| 113 | intr = down_interruptible(&fc->outstanding_sem); | 114 | intr = down_interruptible(&fc->outstanding_sem); |
| 114 | restore_sigs(&oldset); | 115 | restore_sigs(&oldset); |
| 115 | return intr ? NULL : do_get_request(fc); | 116 | if (intr) { |
| 117 | atomic_dec(&fc->num_waiting); | ||
| 118 | return NULL; | ||
| 119 | } | ||
| 120 | return do_get_request(fc); | ||
| 116 | } | 121 | } |
| 117 | 122 | ||
| 118 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) | 123 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) |
| 119 | { | 124 | { |
| 120 | spin_lock(&fuse_lock); | 125 | spin_lock(&fuse_lock); |
| 121 | if (req->preallocated) | 126 | if (req->preallocated) { |
| 127 | atomic_dec(&fc->num_waiting); | ||
| 122 | list_add(&req->list, &fc->unused_list); | 128 | list_add(&req->list, &fc->unused_list); |
| 123 | else | 129 | } else |
| 124 | fuse_request_free(req); | 130 | fuse_request_free(req); |
| 125 | 131 | ||
| 126 | /* If we are in debt decrease that first */ | 132 | /* If we are in debt decrease that first */ |
| @@ -148,42 +154,23 @@ void fuse_release_background(struct fuse_req *req) | |||
| 148 | spin_unlock(&fuse_lock); | 154 | spin_unlock(&fuse_lock); |
| 149 | } | 155 | } |
| 150 | 156 | ||
| 151 | static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | ||
| 152 | { | ||
| 153 | int i; | ||
| 154 | struct fuse_init_out *arg = &req->misc.init_out; | ||
| 155 | |||
| 156 | if (arg->major != FUSE_KERNEL_VERSION) | ||
| 157 | fc->conn_error = 1; | ||
| 158 | else { | ||
| 159 | fc->minor = arg->minor; | ||
| 160 | fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; | ||
| 161 | } | ||
| 162 | |||
| 163 | /* After INIT reply is received other requests can go | ||
| 164 | out. So do (FUSE_MAX_OUTSTANDING - 1) number of | ||
| 165 | up()s on outstanding_sem. The last up() is done in | ||
| 166 | fuse_putback_request() */ | ||
| 167 | for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) | ||
| 168 | up(&fc->outstanding_sem); | ||
| 169 | } | ||
| 170 | |||
| 171 | /* | 157 | /* |
| 172 | * This function is called when a request is finished. Either a reply | 158 | * This function is called when a request is finished. Either a reply |
| 173 | * has arrived or it was interrupted (and not yet sent) or some error | 159 | * has arrived or it was interrupted (and not yet sent) or some error |
| 174 | * occurred during communication with userspace, or the device file was | 160 | * occurred during communication with userspace, or the device file |
| 175 | * closed. It decreases the reference count for the request. In case | 161 | * was closed. In case of a background request the reference to the |
| 176 | * of a background request the reference to the stored objects are | 162 | * stored objects are released. The requester thread is woken up (if |
| 177 | * released. The requester thread is woken up (if still waiting), and | 163 | * still waiting), the 'end' callback is called if given, else the |
| 178 | * finally the request is either freed or put on the unused_list | 164 | * reference to the request is released |
| 179 | * | 165 | * |
| 180 | * Called with fuse_lock, unlocks it | 166 | * Called with fuse_lock, unlocks it |
| 181 | */ | 167 | */ |
| 182 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 168 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
| 183 | { | 169 | { |
| 184 | int putback; | 170 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
| 185 | req->finished = 1; | 171 | req->end = NULL; |
| 186 | putback = atomic_dec_and_test(&req->count); | 172 | list_del(&req->list); |
| 173 | req->state = FUSE_REQ_FINISHED; | ||
| 187 | spin_unlock(&fuse_lock); | 174 | spin_unlock(&fuse_lock); |
| 188 | if (req->background) { | 175 | if (req->background) { |
| 189 | down_read(&fc->sbput_sem); | 176 | down_read(&fc->sbput_sem); |
| @@ -192,18 +179,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
| 192 | up_read(&fc->sbput_sem); | 179 | up_read(&fc->sbput_sem); |
| 193 | } | 180 | } |
| 194 | wake_up(&req->waitq); | 181 | wake_up(&req->waitq); |
| 195 | if (req->in.h.opcode == FUSE_INIT) | 182 | if (end) |
| 196 | process_init_reply(fc, req); | 183 | end(fc, req); |
| 197 | else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) { | 184 | else |
| 198 | /* Special case for failed iget in CREATE */ | 185 | fuse_put_request(fc, req); |
| 199 | u64 nodeid = req->in.h.nodeid; | ||
| 200 | __fuse_get_request(req); | ||
| 201 | fuse_reset_request(req); | ||
| 202 | fuse_send_forget(fc, req, nodeid, 1); | ||
| 203 | putback = 0; | ||
| 204 | } | ||
| 205 | if (putback) | ||
| 206 | fuse_putback_request(fc, req); | ||
| 207 | } | 186 | } |
| 208 | 187 | ||
| 209 | /* | 188 | /* |
| @@ -254,14 +233,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
| 254 | 233 | ||
| 255 | spin_unlock(&fuse_lock); | 234 | spin_unlock(&fuse_lock); |
| 256 | block_sigs(&oldset); | 235 | block_sigs(&oldset); |
| 257 | wait_event_interruptible(req->waitq, req->finished); | 236 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); |
| 258 | restore_sigs(&oldset); | 237 | restore_sigs(&oldset); |
| 259 | spin_lock(&fuse_lock); | 238 | spin_lock(&fuse_lock); |
| 260 | if (req->finished) | 239 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
| 261 | return; | 240 | return; |
| 262 | 241 | ||
| 263 | req->out.h.error = -EINTR; | 242 | if (!req->interrupted) { |
| 264 | req->interrupted = 1; | 243 | req->out.h.error = -EINTR; |
| 244 | req->interrupted = 1; | ||
| 245 | } | ||
| 265 | if (req->locked) { | 246 | if (req->locked) { |
| 266 | /* This is uninterruptible sleep, because data is | 247 | /* This is uninterruptible sleep, because data is |
| 267 | being copied to/from the buffers of req. During | 248 | being copied to/from the buffers of req. During |
| @@ -272,10 +253,10 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
| 272 | wait_event(req->waitq, !req->locked); | 253 | wait_event(req->waitq, !req->locked); |
| 273 | spin_lock(&fuse_lock); | 254 | spin_lock(&fuse_lock); |
| 274 | } | 255 | } |
| 275 | if (!req->sent && !list_empty(&req->list)) { | 256 | if (req->state == FUSE_REQ_PENDING) { |
| 276 | list_del(&req->list); | 257 | list_del(&req->list); |
| 277 | __fuse_put_request(req); | 258 | __fuse_put_request(req); |
| 278 | } else if (!req->finished && req->sent) | 259 | } else if (req->state == FUSE_REQ_SENT) |
| 279 | background_request(fc, req); | 260 | background_request(fc, req); |
| 280 | } | 261 | } |
| 281 | 262 | ||
| @@ -310,6 +291,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |||
| 310 | fc->outstanding_debt++; | 291 | fc->outstanding_debt++; |
| 311 | } | 292 | } |
| 312 | list_add_tail(&req->list, &fc->pending); | 293 | list_add_tail(&req->list, &fc->pending); |
| 294 | req->state = FUSE_REQ_PENDING; | ||
| 313 | wake_up(&fc->waitq); | 295 | wake_up(&fc->waitq); |
| 314 | } | 296 | } |
| 315 | 297 | ||
| @@ -362,34 +344,12 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req) | |||
| 362 | request_send_nowait(fc, req); | 344 | request_send_nowait(fc, req); |
| 363 | } | 345 | } |
| 364 | 346 | ||
| 365 | void fuse_send_init(struct fuse_conn *fc) | ||
| 366 | { | ||
| 367 | /* This is called from fuse_read_super() so there's guaranteed | ||
| 368 | to be a request available */ | ||
| 369 | struct fuse_req *req = do_get_request(fc); | ||
| 370 | struct fuse_init_in *arg = &req->misc.init_in; | ||
| 371 | arg->major = FUSE_KERNEL_VERSION; | ||
| 372 | arg->minor = FUSE_KERNEL_MINOR_VERSION; | ||
| 373 | req->in.h.opcode = FUSE_INIT; | ||
| 374 | req->in.numargs = 1; | ||
| 375 | req->in.args[0].size = sizeof(*arg); | ||
| 376 | req->in.args[0].value = arg; | ||
| 377 | req->out.numargs = 1; | ||
| 378 | /* Variable length arguement used for backward compatibility | ||
| 379 | with interface version < 7.5. Rest of init_out is zeroed | ||
| 380 | by do_get_request(), so a short reply is not a problem */ | ||
| 381 | req->out.argvar = 1; | ||
| 382 | req->out.args[0].size = sizeof(struct fuse_init_out); | ||
| 383 | req->out.args[0].value = &req->misc.init_out; | ||
| 384 | request_send_background(fc, req); | ||
| 385 | } | ||
| 386 | |||
| 387 | /* | 347 | /* |
| 388 | * Lock the request. Up to the next unlock_request() there mustn't be | 348 | * Lock the request. Up to the next unlock_request() there mustn't be |
| 389 | * anything that could cause a page-fault. If the request was already | 349 | * anything that could cause a page-fault. If the request was already |
| 390 | * interrupted bail out. | 350 | * interrupted bail out. |
| 391 | */ | 351 | */ |
| 392 | static inline int lock_request(struct fuse_req *req) | 352 | static int lock_request(struct fuse_req *req) |
| 393 | { | 353 | { |
| 394 | int err = 0; | 354 | int err = 0; |
| 395 | if (req) { | 355 | if (req) { |
| @@ -408,7 +368,7 @@ static inline int lock_request(struct fuse_req *req) | |||
| 408 | * requester thread is currently waiting for it to be unlocked, so | 368 | * requester thread is currently waiting for it to be unlocked, so |
| 409 | * wake it up. | 369 | * wake it up. |
| 410 | */ | 370 | */ |
| 411 | static inline void unlock_request(struct fuse_req *req) | 371 | static void unlock_request(struct fuse_req *req) |
| 412 | { | 372 | { |
| 413 | if (req) { | 373 | if (req) { |
| 414 | spin_lock(&fuse_lock); | 374 | spin_lock(&fuse_lock); |
| @@ -444,7 +404,7 @@ static void fuse_copy_init(struct fuse_copy_state *cs, int write, | |||
| 444 | } | 404 | } |
| 445 | 405 | ||
| 446 | /* Unmap and put previous page of userspace buffer */ | 406 | /* Unmap and put previous page of userspace buffer */ |
| 447 | static inline void fuse_copy_finish(struct fuse_copy_state *cs) | 407 | static void fuse_copy_finish(struct fuse_copy_state *cs) |
| 448 | { | 408 | { |
| 449 | if (cs->mapaddr) { | 409 | if (cs->mapaddr) { |
| 450 | kunmap_atomic(cs->mapaddr, KM_USER0); | 410 | kunmap_atomic(cs->mapaddr, KM_USER0); |
| @@ -493,8 +453,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
| 493 | } | 453 | } |
| 494 | 454 | ||
| 495 | /* Do as much copy to/from userspace buffer as we can */ | 455 | /* Do as much copy to/from userspace buffer as we can */ |
| 496 | static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, | 456 | static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) |
| 497 | unsigned *size) | ||
| 498 | { | 457 | { |
| 499 | unsigned ncpy = min(*size, cs->len); | 458 | unsigned ncpy = min(*size, cs->len); |
| 500 | if (val) { | 459 | if (val) { |
| @@ -514,8 +473,8 @@ static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, | |||
| 514 | * Copy a page in the request to/from the userspace buffer. Must be | 473 | * Copy a page in the request to/from the userspace buffer. Must be |
| 515 | * done atomically | 474 | * done atomically |
| 516 | */ | 475 | */ |
| 517 | static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, | 476 | static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, |
| 518 | unsigned offset, unsigned count, int zeroing) | 477 | unsigned offset, unsigned count, int zeroing) |
| 519 | { | 478 | { |
| 520 | if (page && zeroing && count < PAGE_SIZE) { | 479 | if (page && zeroing && count < PAGE_SIZE) { |
| 521 | void *mapaddr = kmap_atomic(page, KM_USER1); | 480 | void *mapaddr = kmap_atomic(page, KM_USER1); |
| @@ -597,7 +556,7 @@ static void request_wait(struct fuse_conn *fc) | |||
| 597 | DECLARE_WAITQUEUE(wait, current); | 556 | DECLARE_WAITQUEUE(wait, current); |
| 598 | 557 | ||
| 599 | add_wait_queue_exclusive(&fc->waitq, &wait); | 558 | add_wait_queue_exclusive(&fc->waitq, &wait); |
| 600 | while (fc->mounted && list_empty(&fc->pending)) { | 559 | while (fc->connected && list_empty(&fc->pending)) { |
| 601 | set_current_state(TASK_INTERRUPTIBLE); | 560 | set_current_state(TASK_INTERRUPTIBLE); |
| 602 | if (signal_pending(current)) | 561 | if (signal_pending(current)) |
| 603 | break; | 562 | break; |
| @@ -637,14 +596,15 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
| 637 | goto err_unlock; | 596 | goto err_unlock; |
| 638 | request_wait(fc); | 597 | request_wait(fc); |
| 639 | err = -ENODEV; | 598 | err = -ENODEV; |
| 640 | if (!fc->mounted) | 599 | if (!fc->connected) |
| 641 | goto err_unlock; | 600 | goto err_unlock; |
| 642 | err = -ERESTARTSYS; | 601 | err = -ERESTARTSYS; |
| 643 | if (list_empty(&fc->pending)) | 602 | if (list_empty(&fc->pending)) |
| 644 | goto err_unlock; | 603 | goto err_unlock; |
| 645 | 604 | ||
| 646 | req = list_entry(fc->pending.next, struct fuse_req, list); | 605 | req = list_entry(fc->pending.next, struct fuse_req, list); |
| 647 | list_del_init(&req->list); | 606 | req->state = FUSE_REQ_READING; |
| 607 | list_move(&req->list, &fc->io); | ||
| 648 | 608 | ||
| 649 | in = &req->in; | 609 | in = &req->in; |
| 650 | reqsize = in->h.len; | 610 | reqsize = in->h.len; |
| @@ -677,8 +637,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
| 677 | if (!req->isreply) | 637 | if (!req->isreply) |
| 678 | request_end(fc, req); | 638 | request_end(fc, req); |
| 679 | else { | 639 | else { |
| 680 | req->sent = 1; | 640 | req->state = FUSE_REQ_SENT; |
| 681 | list_add_tail(&req->list, &fc->processing); | 641 | list_move_tail(&req->list, &fc->processing); |
| 682 | spin_unlock(&fuse_lock); | 642 | spin_unlock(&fuse_lock); |
| 683 | } | 643 | } |
| 684 | return reqsize; | 644 | return reqsize; |
| @@ -766,17 +726,23 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
| 766 | goto err_finish; | 726 | goto err_finish; |
| 767 | 727 | ||
| 768 | spin_lock(&fuse_lock); | 728 | spin_lock(&fuse_lock); |
| 729 | err = -ENOENT; | ||
| 730 | if (!fc->connected) | ||
| 731 | goto err_unlock; | ||
| 732 | |||
| 769 | req = request_find(fc, oh.unique); | 733 | req = request_find(fc, oh.unique); |
| 770 | err = -EINVAL; | 734 | err = -EINVAL; |
| 771 | if (!req) | 735 | if (!req) |
| 772 | goto err_unlock; | 736 | goto err_unlock; |
| 773 | 737 | ||
| 774 | list_del_init(&req->list); | ||
| 775 | if (req->interrupted) { | 738 | if (req->interrupted) { |
| 776 | request_end(fc, req); | 739 | spin_unlock(&fuse_lock); |
| 777 | fuse_copy_finish(&cs); | 740 | fuse_copy_finish(&cs); |
| 741 | spin_lock(&fuse_lock); | ||
| 742 | request_end(fc, req); | ||
| 778 | return -ENOENT; | 743 | return -ENOENT; |
| 779 | } | 744 | } |
| 745 | list_move(&req->list, &fc->io); | ||
| 780 | req->out.h = oh; | 746 | req->out.h = oh; |
| 781 | req->locked = 1; | 747 | req->locked = 1; |
| 782 | cs.req = req; | 748 | cs.req = req; |
| @@ -830,19 +796,90 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |||
| 830 | return mask; | 796 | return mask; |
| 831 | } | 797 | } |
| 832 | 798 | ||
| 833 | /* Abort all requests on the given list (pending or processing) */ | 799 | /* |
| 800 | * Abort all requests on the given list (pending or processing) | ||
| 801 | * | ||
| 802 | * This function releases and reacquires fuse_lock | ||
| 803 | */ | ||
| 834 | static void end_requests(struct fuse_conn *fc, struct list_head *head) | 804 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
| 835 | { | 805 | { |
| 836 | while (!list_empty(head)) { | 806 | while (!list_empty(head)) { |
| 837 | struct fuse_req *req; | 807 | struct fuse_req *req; |
| 838 | req = list_entry(head->next, struct fuse_req, list); | 808 | req = list_entry(head->next, struct fuse_req, list); |
| 839 | list_del_init(&req->list); | ||
| 840 | req->out.h.error = -ECONNABORTED; | 809 | req->out.h.error = -ECONNABORTED; |
| 841 | request_end(fc, req); | 810 | request_end(fc, req); |
| 842 | spin_lock(&fuse_lock); | 811 | spin_lock(&fuse_lock); |
| 843 | } | 812 | } |
| 844 | } | 813 | } |
| 845 | 814 | ||
| 815 | /* | ||
| 816 | * Abort requests under I/O | ||
| 817 | * | ||
| 818 | * The requests are set to interrupted and finished, and the request | ||
| 819 | * waiter is woken up. This will make request_wait_answer() wait | ||
| 820 | * until the request is unlocked and then return. | ||
| 821 | * | ||
| 822 | * If the request is asynchronous, then the end function needs to be | ||
| 823 | * called after waiting for the request to be unlocked (if it was | ||
| 824 | * locked). | ||
| 825 | */ | ||
| 826 | static void end_io_requests(struct fuse_conn *fc) | ||
| 827 | { | ||
| 828 | while (!list_empty(&fc->io)) { | ||
| 829 | struct fuse_req *req = | ||
| 830 | list_entry(fc->io.next, struct fuse_req, list); | ||
| 831 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | ||
| 832 | |||
| 833 | req->interrupted = 1; | ||
| 834 | req->out.h.error = -ECONNABORTED; | ||
| 835 | req->state = FUSE_REQ_FINISHED; | ||
| 836 | list_del_init(&req->list); | ||
| 837 | wake_up(&req->waitq); | ||
| 838 | if (end) { | ||
| 839 | req->end = NULL; | ||
| 840 | /* The end function will consume this reference */ | ||
| 841 | __fuse_get_request(req); | ||
| 842 | spin_unlock(&fuse_lock); | ||
| 843 | wait_event(req->waitq, !req->locked); | ||
| 844 | end(fc, req); | ||
| 845 | spin_lock(&fuse_lock); | ||
| 846 | } | ||
| 847 | } | ||
| 848 | } | ||
| 849 | |||
| 850 | /* | ||
| 851 | * Abort all requests. | ||
| 852 | * | ||
| 853 | * Emergency exit in case of a malicious or accidental deadlock, or | ||
| 854 | * just a hung filesystem. | ||
| 855 | * | ||
| 856 | * The same effect is usually achievable through killing the | ||
| 857 | * filesystem daemon and all users of the filesystem. The exception | ||
| 858 | * is the combination of an asynchronous request and the tricky | ||
| 859 | * deadlock (see Documentation/filesystems/fuse.txt). | ||
| 860 | * | ||
| 861 | * During the aborting, progression of requests from the pending and | ||
| 862 | * processing lists onto the io list, and progression of new requests | ||
| 863 | * onto the pending list is prevented by req->connected being false. | ||
| 864 | * | ||
| 865 | * Progression of requests under I/O to the processing list is | ||
| 866 | * prevented by the req->interrupted flag being true for these | ||
| 867 | * requests. For this reason requests on the io list must be aborted | ||
| 868 | * first. | ||
| 869 | */ | ||
| 870 | void fuse_abort_conn(struct fuse_conn *fc) | ||
| 871 | { | ||
| 872 | spin_lock(&fuse_lock); | ||
| 873 | if (fc->connected) { | ||
| 874 | fc->connected = 0; | ||
| 875 | end_io_requests(fc); | ||
| 876 | end_requests(fc, &fc->pending); | ||
| 877 | end_requests(fc, &fc->processing); | ||
| 878 | wake_up_all(&fc->waitq); | ||
| 879 | } | ||
| 880 | spin_unlock(&fuse_lock); | ||
| 881 | } | ||
| 882 | |||
| 846 | static int fuse_dev_release(struct inode *inode, struct file *file) | 883 | static int fuse_dev_release(struct inode *inode, struct file *file) |
| 847 | { | 884 | { |
| 848 | struct fuse_conn *fc; | 885 | struct fuse_conn *fc; |
| @@ -853,9 +890,11 @@ static int fuse_dev_release(struct inode *inode, struct file *file) | |||
| 853 | fc->connected = 0; | 890 | fc->connected = 0; |
| 854 | end_requests(fc, &fc->pending); | 891 | end_requests(fc, &fc->pending); |
| 855 | end_requests(fc, &fc->processing); | 892 | end_requests(fc, &fc->processing); |
| 856 | fuse_release_conn(fc); | ||
| 857 | } | 893 | } |
| 858 | spin_unlock(&fuse_lock); | 894 | spin_unlock(&fuse_lock); |
| 895 | if (fc) | ||
| 896 | kobject_put(&fc->kobj); | ||
| 897 | |||
| 859 | return 0; | 898 | return 0; |
| 860 | } | 899 | } |
| 861 | 900 | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 417bcee466f6..21fd59c7bc24 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
| @@ -23,8 +23,7 @@ | |||
| 23 | /* | 23 | /* |
| 24 | * Calculate the time in jiffies until a dentry/attributes are valid | 24 | * Calculate the time in jiffies until a dentry/attributes are valid |
| 25 | */ | 25 | */ |
| 26 | static inline unsigned long time_to_jiffies(unsigned long sec, | 26 | static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec) |
| 27 | unsigned long nsec) | ||
| 28 | { | 27 | { |
| 29 | struct timespec ts = {sec, nsec}; | 28 | struct timespec ts = {sec, nsec}; |
| 30 | return jiffies + timespec_to_jiffies(&ts); | 29 | return jiffies + timespec_to_jiffies(&ts); |
| @@ -157,7 +156,7 @@ static int dir_alias(struct inode *inode) | |||
| 157 | return 0; | 156 | return 0; |
| 158 | } | 157 | } |
| 159 | 158 | ||
| 160 | static inline int invalid_nodeid(u64 nodeid) | 159 | static int invalid_nodeid(u64 nodeid) |
| 161 | { | 160 | { |
| 162 | return !nodeid || nodeid == FUSE_ROOT_ID; | 161 | return !nodeid || nodeid == FUSE_ROOT_ID; |
| 163 | } | 162 | } |
| @@ -166,7 +165,7 @@ static struct dentry_operations fuse_dentry_operations = { | |||
| 166 | .d_revalidate = fuse_dentry_revalidate, | 165 | .d_revalidate = fuse_dentry_revalidate, |
| 167 | }; | 166 | }; |
| 168 | 167 | ||
| 169 | static inline int valid_mode(int m) | 168 | static int valid_mode(int m) |
| 170 | { | 169 | { |
| 171 | return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || | 170 | return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || |
| 172 | S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m); | 171 | S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m); |
| @@ -763,13 +762,6 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file, | |||
| 763 | return 0; | 762 | return 0; |
| 764 | } | 763 | } |
| 765 | 764 | ||
| 766 | static inline size_t fuse_send_readdir(struct fuse_req *req, struct file *file, | ||
| 767 | struct inode *inode, loff_t pos, | ||
| 768 | size_t count) | ||
| 769 | { | ||
| 770 | return fuse_send_read_common(req, file, inode, pos, count, 1); | ||
| 771 | } | ||
| 772 | |||
| 773 | static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) | 765 | static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) |
| 774 | { | 766 | { |
| 775 | int err; | 767 | int err; |
| @@ -793,7 +785,9 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) | |||
| 793 | } | 785 | } |
| 794 | req->num_pages = 1; | 786 | req->num_pages = 1; |
| 795 | req->pages[0] = page; | 787 | req->pages[0] = page; |
| 796 | nbytes = fuse_send_readdir(req, file, inode, file->f_pos, PAGE_SIZE); | 788 | fuse_read_fill(req, file, inode, file->f_pos, PAGE_SIZE, FUSE_READDIR); |
| 789 | request_send(fc, req); | ||
| 790 | nbytes = req->out.args[0].size; | ||
| 797 | err = req->out.h.error; | 791 | err = req->out.h.error; |
| 798 | fuse_put_request(fc, req); | 792 | fuse_put_request(fc, req); |
| 799 | if (!err) | 793 | if (!err) |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 63d2980df5c9..a7ef5e716f3c 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -113,6 +113,14 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir) | |||
| 113 | return err; | 113 | return err; |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | /* Special case for failed iget in CREATE */ | ||
| 117 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) | ||
| 118 | { | ||
| 119 | u64 nodeid = req->in.h.nodeid; | ||
| 120 | fuse_reset_request(req); | ||
| 121 | fuse_send_forget(fc, req, nodeid, 1); | ||
| 122 | } | ||
| 123 | |||
| 116 | void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, | 124 | void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, |
| 117 | u64 nodeid, struct inode *inode, int flags, int isdir) | 125 | u64 nodeid, struct inode *inode, int flags, int isdir) |
| 118 | { | 126 | { |
| @@ -128,6 +136,8 @@ void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, | |||
| 128 | req->in.args[0].size = sizeof(struct fuse_release_in); | 136 | req->in.args[0].size = sizeof(struct fuse_release_in); |
| 129 | req->in.args[0].value = inarg; | 137 | req->in.args[0].value = inarg; |
| 130 | request_send_background(fc, req); | 138 | request_send_background(fc, req); |
| 139 | if (!inode) | ||
| 140 | req->end = fuse_release_end; | ||
| 131 | kfree(ff); | 141 | kfree(ff); |
| 132 | } | 142 | } |
| 133 | 143 | ||
| @@ -240,38 +250,35 @@ static int fuse_fsync(struct file *file, struct dentry *de, int datasync) | |||
| 240 | return fuse_fsync_common(file, de, datasync, 0); | 250 | return fuse_fsync_common(file, de, datasync, 0); |
| 241 | } | 251 | } |
| 242 | 252 | ||
| 243 | size_t fuse_send_read_common(struct fuse_req *req, struct file *file, | 253 | void fuse_read_fill(struct fuse_req *req, struct file *file, |
| 244 | struct inode *inode, loff_t pos, size_t count, | 254 | struct inode *inode, loff_t pos, size_t count, int opcode) |
| 245 | int isdir) | ||
| 246 | { | 255 | { |
| 247 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
| 248 | struct fuse_file *ff = file->private_data; | 256 | struct fuse_file *ff = file->private_data; |
| 249 | struct fuse_read_in inarg; | 257 | struct fuse_read_in *inarg = &req->misc.read_in; |
| 250 | 258 | ||
| 251 | memset(&inarg, 0, sizeof(struct fuse_read_in)); | 259 | inarg->fh = ff->fh; |
| 252 | inarg.fh = ff->fh; | 260 | inarg->offset = pos; |
| 253 | inarg.offset = pos; | 261 | inarg->size = count; |
| 254 | inarg.size = count; | 262 | req->in.h.opcode = opcode; |
| 255 | req->in.h.opcode = isdir ? FUSE_READDIR : FUSE_READ; | ||
| 256 | req->in.h.nodeid = get_node_id(inode); | 263 | req->in.h.nodeid = get_node_id(inode); |
| 257 | req->inode = inode; | 264 | req->inode = inode; |
| 258 | req->file = file; | 265 | req->file = file; |
| 259 | req->in.numargs = 1; | 266 | req->in.numargs = 1; |
| 260 | req->in.args[0].size = sizeof(struct fuse_read_in); | 267 | req->in.args[0].size = sizeof(struct fuse_read_in); |
| 261 | req->in.args[0].value = &inarg; | 268 | req->in.args[0].value = inarg; |
| 262 | req->out.argpages = 1; | 269 | req->out.argpages = 1; |
| 263 | req->out.argvar = 1; | 270 | req->out.argvar = 1; |
| 264 | req->out.numargs = 1; | 271 | req->out.numargs = 1; |
| 265 | req->out.args[0].size = count; | 272 | req->out.args[0].size = count; |
| 266 | request_send(fc, req); | ||
| 267 | return req->out.args[0].size; | ||
| 268 | } | 273 | } |
| 269 | 274 | ||
| 270 | static inline size_t fuse_send_read(struct fuse_req *req, struct file *file, | 275 | static size_t fuse_send_read(struct fuse_req *req, struct file *file, |
| 271 | struct inode *inode, loff_t pos, | 276 | struct inode *inode, loff_t pos, size_t count) |
| 272 | size_t count) | ||
| 273 | { | 277 | { |
| 274 | return fuse_send_read_common(req, file, inode, pos, count, 0); | 278 | struct fuse_conn *fc = get_fuse_conn(inode); |
| 279 | fuse_read_fill(req, file, inode, pos, count, FUSE_READ); | ||
| 280 | request_send(fc, req); | ||
| 281 | return req->out.args[0].size; | ||
| 275 | } | 282 | } |
| 276 | 283 | ||
| 277 | static int fuse_readpage(struct file *file, struct page *page) | 284 | static int fuse_readpage(struct file *file, struct page *page) |
| @@ -304,21 +311,33 @@ static int fuse_readpage(struct file *file, struct page *page) | |||
| 304 | return err; | 311 | return err; |
| 305 | } | 312 | } |
| 306 | 313 | ||
| 307 | static int fuse_send_readpages(struct fuse_req *req, struct file *file, | 314 | static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) |
| 308 | struct inode *inode) | ||
| 309 | { | 315 | { |
| 310 | loff_t pos = page_offset(req->pages[0]); | 316 | int i; |
| 311 | size_t count = req->num_pages << PAGE_CACHE_SHIFT; | 317 | |
| 312 | unsigned i; | 318 | fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */ |
| 313 | req->out.page_zeroing = 1; | 319 | |
| 314 | fuse_send_read(req, file, inode, pos, count); | ||
| 315 | for (i = 0; i < req->num_pages; i++) { | 320 | for (i = 0; i < req->num_pages; i++) { |
| 316 | struct page *page = req->pages[i]; | 321 | struct page *page = req->pages[i]; |
| 317 | if (!req->out.h.error) | 322 | if (!req->out.h.error) |
| 318 | SetPageUptodate(page); | 323 | SetPageUptodate(page); |
| 324 | else | ||
| 325 | SetPageError(page); | ||
| 319 | unlock_page(page); | 326 | unlock_page(page); |
| 320 | } | 327 | } |
| 321 | return req->out.h.error; | 328 | fuse_put_request(fc, req); |
| 329 | } | ||
| 330 | |||
| 331 | static void fuse_send_readpages(struct fuse_req *req, struct file *file, | ||
| 332 | struct inode *inode) | ||
| 333 | { | ||
| 334 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
| 335 | loff_t pos = page_offset(req->pages[0]); | ||
| 336 | size_t count = req->num_pages << PAGE_CACHE_SHIFT; | ||
| 337 | req->out.page_zeroing = 1; | ||
| 338 | req->end = fuse_readpages_end; | ||
| 339 | fuse_read_fill(req, file, inode, pos, count, FUSE_READ); | ||
| 340 | request_send_background(fc, req); | ||
| 322 | } | 341 | } |
| 323 | 342 | ||
| 324 | struct fuse_readpages_data { | 343 | struct fuse_readpages_data { |
| @@ -338,12 +357,12 @@ static int fuse_readpages_fill(void *_data, struct page *page) | |||
| 338 | (req->num_pages == FUSE_MAX_PAGES_PER_REQ || | 357 | (req->num_pages == FUSE_MAX_PAGES_PER_REQ || |
| 339 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || | 358 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || |
| 340 | req->pages[req->num_pages - 1]->index + 1 != page->index)) { | 359 | req->pages[req->num_pages - 1]->index + 1 != page->index)) { |
| 341 | int err = fuse_send_readpages(req, data->file, inode); | 360 | fuse_send_readpages(req, data->file, inode); |
| 342 | if (err) { | 361 | data->req = req = fuse_get_request(fc); |
| 362 | if (!req) { | ||
| 343 | unlock_page(page); | 363 | unlock_page(page); |
| 344 | return err; | 364 | return -EINTR; |
| 345 | } | 365 | } |
| 346 | fuse_reset_request(req); | ||
| 347 | } | 366 | } |
| 348 | req->pages[req->num_pages] = page; | 367 | req->pages[req->num_pages] = page; |
| 349 | req->num_pages ++; | 368 | req->num_pages ++; |
| @@ -368,10 +387,8 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, | |||
| 368 | return -EINTR; | 387 | return -EINTR; |
| 369 | 388 | ||
| 370 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); | 389 | err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); |
| 371 | if (!err && data.req->num_pages) | 390 | if (!err) |
| 372 | err = fuse_send_readpages(data.req, file, inode); | 391 | fuse_send_readpages(data.req, file, inode); |
| 373 | fuse_put_request(fc, data.req); | ||
| 374 | fuse_invalidate_attr(inode); /* atime changed */ | ||
| 375 | return err; | 392 | return err; |
| 376 | } | 393 | } |
| 377 | 394 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 74c8d098a14a..46cf933aa3bf 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
| @@ -94,6 +94,11 @@ struct fuse_out { | |||
| 94 | /** Header returned from userspace */ | 94 | /** Header returned from userspace */ |
| 95 | struct fuse_out_header h; | 95 | struct fuse_out_header h; |
| 96 | 96 | ||
| 97 | /* | ||
| 98 | * The following bitfields are not changed during the request | ||
| 99 | * processing | ||
| 100 | */ | ||
| 101 | |||
| 97 | /** Last argument is variable length (can be shorter than | 102 | /** Last argument is variable length (can be shorter than |
| 98 | arg->size) */ | 103 | arg->size) */ |
| 99 | unsigned argvar:1; | 104 | unsigned argvar:1; |
| @@ -111,12 +116,23 @@ struct fuse_out { | |||
| 111 | struct fuse_arg args[3]; | 116 | struct fuse_arg args[3]; |
| 112 | }; | 117 | }; |
| 113 | 118 | ||
| 119 | /** The request state */ | ||
| 120 | enum fuse_req_state { | ||
| 121 | FUSE_REQ_INIT = 0, | ||
| 122 | FUSE_REQ_PENDING, | ||
| 123 | FUSE_REQ_READING, | ||
| 124 | FUSE_REQ_SENT, | ||
| 125 | FUSE_REQ_FINISHED | ||
| 126 | }; | ||
| 127 | |||
| 128 | struct fuse_conn; | ||
| 129 | |||
| 114 | /** | 130 | /** |
| 115 | * A request to the client | 131 | * A request to the client |
| 116 | */ | 132 | */ |
| 117 | struct fuse_req { | 133 | struct fuse_req { |
| 118 | /** This can be on either unused_list, pending or processing | 134 | /** This can be on either unused_list, pending processing or |
| 119 | lists in fuse_conn */ | 135 | io lists in fuse_conn */ |
| 120 | struct list_head list; | 136 | struct list_head list; |
| 121 | 137 | ||
| 122 | /** Entry on the background list */ | 138 | /** Entry on the background list */ |
| @@ -125,6 +141,12 @@ struct fuse_req { | |||
| 125 | /** refcount */ | 141 | /** refcount */ |
| 126 | atomic_t count; | 142 | atomic_t count; |
| 127 | 143 | ||
| 144 | /* | ||
| 145 | * The following bitfields are either set once before the | ||
| 146 | * request is queued or setting/clearing them is protected by | ||
| 147 | * fuse_lock | ||
| 148 | */ | ||
| 149 | |||
| 128 | /** True if the request has reply */ | 150 | /** True if the request has reply */ |
| 129 | unsigned isreply:1; | 151 | unsigned isreply:1; |
| 130 | 152 | ||
| @@ -140,11 +162,8 @@ struct fuse_req { | |||
| 140 | /** Data is being copied to/from the request */ | 162 | /** Data is being copied to/from the request */ |
| 141 | unsigned locked:1; | 163 | unsigned locked:1; |
| 142 | 164 | ||
| 143 | /** Request has been sent to userspace */ | 165 | /** State of the request */ |
| 144 | unsigned sent:1; | 166 | enum fuse_req_state state; |
| 145 | |||
| 146 | /** The request is finished */ | ||
| 147 | unsigned finished:1; | ||
| 148 | 167 | ||
| 149 | /** The request input */ | 168 | /** The request input */ |
| 150 | struct fuse_in in; | 169 | struct fuse_in in; |
| @@ -161,6 +180,7 @@ struct fuse_req { | |||
| 161 | struct fuse_release_in release_in; | 180 | struct fuse_release_in release_in; |
| 162 | struct fuse_init_in init_in; | 181 | struct fuse_init_in init_in; |
| 163 | struct fuse_init_out init_out; | 182 | struct fuse_init_out init_out; |
| 183 | struct fuse_read_in read_in; | ||
| 164 | } misc; | 184 | } misc; |
| 165 | 185 | ||
| 166 | /** page vector */ | 186 | /** page vector */ |
| @@ -180,6 +200,9 @@ struct fuse_req { | |||
| 180 | 200 | ||
| 181 | /** File used in the request (or NULL) */ | 201 | /** File used in the request (or NULL) */ |
| 182 | struct file *file; | 202 | struct file *file; |
| 203 | |||
| 204 | /** Request completion callback */ | ||
| 205 | void (*end)(struct fuse_conn *, struct fuse_req *); | ||
| 183 | }; | 206 | }; |
| 184 | 207 | ||
| 185 | /** | 208 | /** |
| @@ -190,9 +213,6 @@ struct fuse_req { | |||
| 190 | * unmounted. | 213 | * unmounted. |
| 191 | */ | 214 | */ |
| 192 | struct fuse_conn { | 215 | struct fuse_conn { |
| 193 | /** Reference count */ | ||
| 194 | int count; | ||
| 195 | |||
| 196 | /** The user id for this mount */ | 216 | /** The user id for this mount */ |
| 197 | uid_t user_id; | 217 | uid_t user_id; |
| 198 | 218 | ||
| @@ -217,6 +237,9 @@ struct fuse_conn { | |||
| 217 | /** The list of requests being processed */ | 237 | /** The list of requests being processed */ |
| 218 | struct list_head processing; | 238 | struct list_head processing; |
| 219 | 239 | ||
| 240 | /** The list of requests under I/O */ | ||
| 241 | struct list_head io; | ||
| 242 | |||
| 220 | /** Requests put in the background (RELEASE or any other | 243 | /** Requests put in the background (RELEASE or any other |
| 221 | interrupted request) */ | 244 | interrupted request) */ |
| 222 | struct list_head background; | 245 | struct list_head background; |
| @@ -238,14 +261,22 @@ struct fuse_conn { | |||
| 238 | u64 reqctr; | 261 | u64 reqctr; |
| 239 | 262 | ||
| 240 | /** Mount is active */ | 263 | /** Mount is active */ |
| 241 | unsigned mounted : 1; | 264 | unsigned mounted; |
| 242 | 265 | ||
| 243 | /** Connection established */ | 266 | /** Connection established, cleared on umount, connection |
| 244 | unsigned connected : 1; | 267 | abort and device release */ |
| 268 | unsigned connected; | ||
| 245 | 269 | ||
| 246 | /** Connection failed (version mismatch) */ | 270 | /** Connection failed (version mismatch). Cannot race with |
| 271 | setting other bitfields since it is only set once in INIT | ||
| 272 | reply, before any other request, and never cleared */ | ||
| 247 | unsigned conn_error : 1; | 273 | unsigned conn_error : 1; |
| 248 | 274 | ||
| 275 | /* | ||
| 276 | * The following bitfields are only for optimization purposes | ||
| 277 | * and hence races in setting them will not cause malfunction | ||
| 278 | */ | ||
| 279 | |||
| 249 | /** Is fsync not implemented by fs? */ | 280 | /** Is fsync not implemented by fs? */ |
| 250 | unsigned no_fsync : 1; | 281 | unsigned no_fsync : 1; |
| 251 | 282 | ||
| @@ -273,21 +304,22 @@ struct fuse_conn { | |||
| 273 | /** Is create not implemented by fs? */ | 304 | /** Is create not implemented by fs? */ |
| 274 | unsigned no_create : 1; | 305 | unsigned no_create : 1; |
| 275 | 306 | ||
| 307 | /** The number of requests waiting for completion */ | ||
| 308 | atomic_t num_waiting; | ||
| 309 | |||
| 276 | /** Negotiated minor version */ | 310 | /** Negotiated minor version */ |
| 277 | unsigned minor; | 311 | unsigned minor; |
| 278 | 312 | ||
| 279 | /** Backing dev info */ | 313 | /** Backing dev info */ |
| 280 | struct backing_dev_info bdi; | 314 | struct backing_dev_info bdi; |
| 281 | }; | ||
| 282 | 315 | ||
| 283 | static inline struct fuse_conn **get_fuse_conn_super_p(struct super_block *sb) | 316 | /** kobject */ |
| 284 | { | 317 | struct kobject kobj; |
| 285 | return (struct fuse_conn **) &sb->s_fs_info; | 318 | }; |
| 286 | } | ||
| 287 | 319 | ||
| 288 | static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) | 320 | static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) |
| 289 | { | 321 | { |
| 290 | return *get_fuse_conn_super_p(sb); | 322 | return sb->s_fs_info; |
| 291 | } | 323 | } |
| 292 | 324 | ||
| 293 | static inline struct fuse_conn *get_fuse_conn(struct inode *inode) | 325 | static inline struct fuse_conn *get_fuse_conn(struct inode *inode) |
| @@ -295,6 +327,11 @@ static inline struct fuse_conn *get_fuse_conn(struct inode *inode) | |||
| 295 | return get_fuse_conn_super(inode->i_sb); | 327 | return get_fuse_conn_super(inode->i_sb); |
| 296 | } | 328 | } |
| 297 | 329 | ||
| 330 | static inline struct fuse_conn *get_fuse_conn_kobj(struct kobject *obj) | ||
| 331 | { | ||
| 332 | return container_of(obj, struct fuse_conn, kobj); | ||
| 333 | } | ||
| 334 | |||
| 298 | static inline struct fuse_inode *get_fuse_inode(struct inode *inode) | 335 | static inline struct fuse_inode *get_fuse_inode(struct inode *inode) |
| 299 | { | 336 | { |
| 300 | return container_of(inode, struct fuse_inode, inode); | 337 | return container_of(inode, struct fuse_inode, inode); |
| @@ -336,11 +373,10 @@ void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, | |||
| 336 | unsigned long nodeid, u64 nlookup); | 373 | unsigned long nodeid, u64 nlookup); |
| 337 | 374 | ||
| 338 | /** | 375 | /** |
| 339 | * Send READ or READDIR request | 376 | * Initialize READ or READDIR request |
| 340 | */ | 377 | */ |
| 341 | size_t fuse_send_read_common(struct fuse_req *req, struct file *file, | 378 | void fuse_read_fill(struct fuse_req *req, struct file *file, |
| 342 | struct inode *inode, loff_t pos, size_t count, | 379 | struct inode *inode, loff_t pos, size_t count, int opcode); |
| 343 | int isdir); | ||
| 344 | 380 | ||
| 345 | /** | 381 | /** |
| 346 | * Send OPEN or OPENDIR request | 382 | * Send OPEN or OPENDIR request |
| @@ -395,12 +431,6 @@ void fuse_init_symlink(struct inode *inode); | |||
| 395 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr); | 431 | void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr); |
| 396 | 432 | ||
| 397 | /** | 433 | /** |
| 398 | * Check if the connection can be released, and if yes, then free the | ||
| 399 | * connection structure | ||
| 400 | */ | ||
| 401 | void fuse_release_conn(struct fuse_conn *fc); | ||
| 402 | |||
| 403 | /** | ||
| 404 | * Initialize the client device | 434 | * Initialize the client device |
| 405 | */ | 435 | */ |
| 406 | int fuse_dev_init(void); | 436 | int fuse_dev_init(void); |
| @@ -456,6 +486,9 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req); | |||
| 456 | */ | 486 | */ |
| 457 | void fuse_release_background(struct fuse_req *req); | 487 | void fuse_release_background(struct fuse_req *req); |
| 458 | 488 | ||
| 489 | /* Abort all requests */ | ||
| 490 | void fuse_abort_conn(struct fuse_conn *fc); | ||
| 491 | |||
| 459 | /** | 492 | /** |
| 460 | * Get the attributes of a file | 493 | * Get the attributes of a file |
| 461 | */ | 494 | */ |
| @@ -465,8 +498,3 @@ int fuse_do_getattr(struct inode *inode); | |||
| 465 | * Invalidate inode attributes | 498 | * Invalidate inode attributes |
| 466 | */ | 499 | */ |
| 467 | void fuse_invalidate_attr(struct inode *inode); | 500 | void fuse_invalidate_attr(struct inode *inode); |
| 468 | |||
| 469 | /** | ||
| 470 | * Send the INIT message | ||
| 471 | */ | ||
| 472 | void fuse_send_init(struct fuse_conn *fc); | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 04c80cc957a3..c755a0440a66 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
| @@ -24,6 +24,13 @@ MODULE_LICENSE("GPL"); | |||
| 24 | 24 | ||
| 25 | spinlock_t fuse_lock; | 25 | spinlock_t fuse_lock; |
| 26 | static kmem_cache_t *fuse_inode_cachep; | 26 | static kmem_cache_t *fuse_inode_cachep; |
| 27 | static struct subsystem connections_subsys; | ||
| 28 | |||
| 29 | struct fuse_conn_attr { | ||
| 30 | struct attribute attr; | ||
| 31 | ssize_t (*show)(struct fuse_conn *, char *); | ||
| 32 | ssize_t (*store)(struct fuse_conn *, const char *, size_t); | ||
| 33 | }; | ||
| 27 | 34 | ||
| 28 | #define FUSE_SUPER_MAGIC 0x65735546 | 35 | #define FUSE_SUPER_MAGIC 0x65735546 |
| 29 | 36 | ||
| @@ -189,6 +196,11 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid, | |||
| 189 | return inode; | 196 | return inode; |
| 190 | } | 197 | } |
| 191 | 198 | ||
| 199 | static void fuse_umount_begin(struct super_block *sb) | ||
| 200 | { | ||
| 201 | fuse_abort_conn(get_fuse_conn_super(sb)); | ||
| 202 | } | ||
| 203 | |||
| 192 | static void fuse_put_super(struct super_block *sb) | 204 | static void fuse_put_super(struct super_block *sb) |
| 193 | { | 205 | { |
| 194 | struct fuse_conn *fc = get_fuse_conn_super(sb); | 206 | struct fuse_conn *fc = get_fuse_conn_super(sb); |
| @@ -200,14 +212,13 @@ static void fuse_put_super(struct super_block *sb) | |||
| 200 | 212 | ||
| 201 | spin_lock(&fuse_lock); | 213 | spin_lock(&fuse_lock); |
| 202 | fc->mounted = 0; | 214 | fc->mounted = 0; |
| 203 | fc->user_id = 0; | 215 | fc->connected = 0; |
| 204 | fc->group_id = 0; | 216 | spin_unlock(&fuse_lock); |
| 205 | fc->flags = 0; | 217 | up_write(&fc->sbput_sem); |
| 206 | /* Flush all readers on this fs */ | 218 | /* Flush all readers on this fs */ |
| 207 | wake_up_all(&fc->waitq); | 219 | wake_up_all(&fc->waitq); |
| 208 | up_write(&fc->sbput_sem); | 220 | kobject_del(&fc->kobj); |
| 209 | fuse_release_conn(fc); | 221 | kobject_put(&fc->kobj); |
| 210 | spin_unlock(&fuse_lock); | ||
| 211 | } | 222 | } |
| 212 | 223 | ||
| 213 | static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) | 224 | static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) |
| @@ -356,8 +367,10 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt) | |||
| 356 | return 0; | 367 | return 0; |
| 357 | } | 368 | } |
| 358 | 369 | ||
| 359 | static void free_conn(struct fuse_conn *fc) | 370 | static void fuse_conn_release(struct kobject *kobj) |
| 360 | { | 371 | { |
| 372 | struct fuse_conn *fc = get_fuse_conn_kobj(kobj); | ||
| 373 | |||
| 361 | while (!list_empty(&fc->unused_list)) { | 374 | while (!list_empty(&fc->unused_list)) { |
| 362 | struct fuse_req *req; | 375 | struct fuse_req *req; |
| 363 | req = list_entry(fc->unused_list.next, struct fuse_req, list); | 376 | req = list_entry(fc->unused_list.next, struct fuse_req, list); |
| @@ -367,33 +380,28 @@ static void free_conn(struct fuse_conn *fc) | |||
| 367 | kfree(fc); | 380 | kfree(fc); |
| 368 | } | 381 | } |
| 369 | 382 | ||
| 370 | /* Must be called with the fuse lock held */ | ||
| 371 | void fuse_release_conn(struct fuse_conn *fc) | ||
| 372 | { | ||
| 373 | fc->count--; | ||
| 374 | if (!fc->count) | ||
| 375 | free_conn(fc); | ||
| 376 | } | ||
| 377 | |||
| 378 | static struct fuse_conn *new_conn(void) | 383 | static struct fuse_conn *new_conn(void) |
| 379 | { | 384 | { |
| 380 | struct fuse_conn *fc; | 385 | struct fuse_conn *fc; |
| 381 | 386 | ||
| 382 | fc = kmalloc(sizeof(*fc), GFP_KERNEL); | 387 | fc = kzalloc(sizeof(*fc), GFP_KERNEL); |
| 383 | if (fc != NULL) { | 388 | if (fc) { |
| 384 | int i; | 389 | int i; |
| 385 | memset(fc, 0, sizeof(*fc)); | ||
| 386 | init_waitqueue_head(&fc->waitq); | 390 | init_waitqueue_head(&fc->waitq); |
| 387 | INIT_LIST_HEAD(&fc->pending); | 391 | INIT_LIST_HEAD(&fc->pending); |
| 388 | INIT_LIST_HEAD(&fc->processing); | 392 | INIT_LIST_HEAD(&fc->processing); |
| 393 | INIT_LIST_HEAD(&fc->io); | ||
| 389 | INIT_LIST_HEAD(&fc->unused_list); | 394 | INIT_LIST_HEAD(&fc->unused_list); |
| 390 | INIT_LIST_HEAD(&fc->background); | 395 | INIT_LIST_HEAD(&fc->background); |
| 391 | sema_init(&fc->outstanding_sem, 0); | 396 | sema_init(&fc->outstanding_sem, 1); /* One for INIT */ |
| 392 | init_rwsem(&fc->sbput_sem); | 397 | init_rwsem(&fc->sbput_sem); |
| 398 | kobj_set_kset_s(fc, connections_subsys); | ||
| 399 | kobject_init(&fc->kobj); | ||
| 400 | atomic_set(&fc->num_waiting, 0); | ||
| 393 | for (i = 0; i < FUSE_MAX_OUTSTANDING; i++) { | 401 | for (i = 0; i < FUSE_MAX_OUTSTANDING; i++) { |
| 394 | struct fuse_req *req = fuse_request_alloc(); | 402 | struct fuse_req *req = fuse_request_alloc(); |
| 395 | if (!req) { | 403 | if (!req) { |
| 396 | free_conn(fc); | 404 | kobject_put(&fc->kobj); |
| 397 | return NULL; | 405 | return NULL; |
| 398 | } | 406 | } |
| 399 | list_add(&req->list, &fc->unused_list); | 407 | list_add(&req->list, &fc->unused_list); |
| @@ -408,25 +416,32 @@ static struct fuse_conn *new_conn(void) | |||
| 408 | static struct fuse_conn *get_conn(struct file *file, struct super_block *sb) | 416 | static struct fuse_conn *get_conn(struct file *file, struct super_block *sb) |
| 409 | { | 417 | { |
| 410 | struct fuse_conn *fc; | 418 | struct fuse_conn *fc; |
| 419 | int err; | ||
| 411 | 420 | ||
| 421 | err = -EINVAL; | ||
| 412 | if (file->f_op != &fuse_dev_operations) | 422 | if (file->f_op != &fuse_dev_operations) |
| 413 | return ERR_PTR(-EINVAL); | 423 | goto out_err; |
| 424 | |||
| 425 | err = -ENOMEM; | ||
| 414 | fc = new_conn(); | 426 | fc = new_conn(); |
| 415 | if (fc == NULL) | 427 | if (!fc) |
| 416 | return ERR_PTR(-ENOMEM); | 428 | goto out_err; |
| 429 | |||
| 417 | spin_lock(&fuse_lock); | 430 | spin_lock(&fuse_lock); |
| 418 | if (file->private_data) { | 431 | err = -EINVAL; |
| 419 | free_conn(fc); | 432 | if (file->private_data) |
| 420 | fc = ERR_PTR(-EINVAL); | 433 | goto out_unlock; |
| 421 | } else { | 434 | |
| 422 | file->private_data = fc; | 435 | kobject_get(&fc->kobj); |
| 423 | *get_fuse_conn_super_p(sb) = fc; | 436 | file->private_data = fc; |
| 424 | fc->mounted = 1; | ||
| 425 | fc->connected = 1; | ||
| 426 | fc->count = 2; | ||
| 427 | } | ||
| 428 | spin_unlock(&fuse_lock); | 437 | spin_unlock(&fuse_lock); |
| 429 | return fc; | 438 | return fc; |
| 439 | |||
| 440 | out_unlock: | ||
| 441 | spin_unlock(&fuse_lock); | ||
| 442 | kobject_put(&fc->kobj); | ||
| 443 | out_err: | ||
| 444 | return ERR_PTR(err); | ||
| 430 | } | 445 | } |
| 431 | 446 | ||
| 432 | static struct inode *get_root_inode(struct super_block *sb, unsigned mode) | 447 | static struct inode *get_root_inode(struct super_block *sb, unsigned mode) |
| @@ -445,16 +460,74 @@ static struct super_operations fuse_super_operations = { | |||
| 445 | .read_inode = fuse_read_inode, | 460 | .read_inode = fuse_read_inode, |
| 446 | .clear_inode = fuse_clear_inode, | 461 | .clear_inode = fuse_clear_inode, |
| 447 | .put_super = fuse_put_super, | 462 | .put_super = fuse_put_super, |
| 463 | .umount_begin = fuse_umount_begin, | ||
| 448 | .statfs = fuse_statfs, | 464 | .statfs = fuse_statfs, |
| 449 | .show_options = fuse_show_options, | 465 | .show_options = fuse_show_options, |
| 450 | }; | 466 | }; |
| 451 | 467 | ||
| 468 | static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) | ||
| 469 | { | ||
| 470 | int i; | ||
| 471 | struct fuse_init_out *arg = &req->misc.init_out; | ||
| 472 | |||
| 473 | if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION) | ||
| 474 | fc->conn_error = 1; | ||
| 475 | else { | ||
| 476 | fc->minor = arg->minor; | ||
| 477 | fc->max_write = arg->minor < 5 ? 4096 : arg->max_write; | ||
| 478 | } | ||
| 479 | |||
| 480 | /* After INIT reply is received other requests can go | ||
| 481 | out. So do (FUSE_MAX_OUTSTANDING - 1) number of | ||
| 482 | up()s on outstanding_sem. The last up() is done in | ||
| 483 | fuse_putback_request() */ | ||
| 484 | for (i = 1; i < FUSE_MAX_OUTSTANDING; i++) | ||
| 485 | up(&fc->outstanding_sem); | ||
| 486 | |||
| 487 | fuse_put_request(fc, req); | ||
| 488 | } | ||
| 489 | |||
| 490 | static void fuse_send_init(struct fuse_conn *fc) | ||
| 491 | { | ||
| 492 | /* This is called from fuse_read_super() so there's guaranteed | ||
| 493 | to be exactly one request available */ | ||
| 494 | struct fuse_req *req = fuse_get_request(fc); | ||
| 495 | struct fuse_init_in *arg = &req->misc.init_in; | ||
| 496 | |||
| 497 | arg->major = FUSE_KERNEL_VERSION; | ||
| 498 | arg->minor = FUSE_KERNEL_MINOR_VERSION; | ||
| 499 | req->in.h.opcode = FUSE_INIT; | ||
| 500 | req->in.numargs = 1; | ||
| 501 | req->in.args[0].size = sizeof(*arg); | ||
| 502 | req->in.args[0].value = arg; | ||
| 503 | req->out.numargs = 1; | ||
| 504 | /* Variable length arguement used for backward compatibility | ||
| 505 | with interface version < 7.5. Rest of init_out is zeroed | ||
| 506 | by do_get_request(), so a short reply is not a problem */ | ||
| 507 | req->out.argvar = 1; | ||
| 508 | req->out.args[0].size = sizeof(struct fuse_init_out); | ||
| 509 | req->out.args[0].value = &req->misc.init_out; | ||
| 510 | req->end = process_init_reply; | ||
| 511 | request_send_background(fc, req); | ||
| 512 | } | ||
| 513 | |||
| 514 | static unsigned long long conn_id(void) | ||
| 515 | { | ||
| 516 | static unsigned long long ctr = 1; | ||
| 517 | unsigned long long val; | ||
| 518 | spin_lock(&fuse_lock); | ||
| 519 | val = ctr++; | ||
| 520 | spin_unlock(&fuse_lock); | ||
| 521 | return val; | ||
| 522 | } | ||
| 523 | |||
| 452 | static int fuse_fill_super(struct super_block *sb, void *data, int silent) | 524 | static int fuse_fill_super(struct super_block *sb, void *data, int silent) |
| 453 | { | 525 | { |
| 454 | struct fuse_conn *fc; | 526 | struct fuse_conn *fc; |
| 455 | struct inode *root; | 527 | struct inode *root; |
| 456 | struct fuse_mount_data d; | 528 | struct fuse_mount_data d; |
| 457 | struct file *file; | 529 | struct file *file; |
| 530 | struct dentry *root_dentry; | ||
| 458 | int err; | 531 | int err; |
| 459 | 532 | ||
| 460 | if (!parse_fuse_opt((char *) data, &d)) | 533 | if (!parse_fuse_opt((char *) data, &d)) |
| @@ -482,23 +555,42 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
| 482 | if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages) | 555 | if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages) |
| 483 | fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE; | 556 | fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE; |
| 484 | 557 | ||
| 558 | /* Used by get_root_inode() */ | ||
| 559 | sb->s_fs_info = fc; | ||
| 560 | |||
| 485 | err = -ENOMEM; | 561 | err = -ENOMEM; |
| 486 | root = get_root_inode(sb, d.rootmode); | 562 | root = get_root_inode(sb, d.rootmode); |
| 487 | if (root == NULL) | 563 | if (!root) |
| 488 | goto err; | 564 | goto err; |
| 489 | 565 | ||
| 490 | sb->s_root = d_alloc_root(root); | 566 | root_dentry = d_alloc_root(root); |
| 491 | if (!sb->s_root) { | 567 | if (!root_dentry) { |
| 492 | iput(root); | 568 | iput(root); |
| 493 | goto err; | 569 | goto err; |
| 494 | } | 570 | } |
| 571 | |||
| 572 | err = kobject_set_name(&fc->kobj, "%llu", conn_id()); | ||
| 573 | if (err) | ||
| 574 | goto err_put_root; | ||
| 575 | |||
| 576 | err = kobject_add(&fc->kobj); | ||
| 577 | if (err) | ||
| 578 | goto err_put_root; | ||
| 579 | |||
| 580 | sb->s_root = root_dentry; | ||
| 581 | spin_lock(&fuse_lock); | ||
| 582 | fc->mounted = 1; | ||
| 583 | fc->connected = 1; | ||
| 584 | spin_unlock(&fuse_lock); | ||
| 585 | |||
| 495 | fuse_send_init(fc); | 586 | fuse_send_init(fc); |
| 587 | |||
| 496 | return 0; | 588 | return 0; |
| 497 | 589 | ||
| 590 | err_put_root: | ||
| 591 | dput(root_dentry); | ||
| 498 | err: | 592 | err: |
| 499 | spin_lock(&fuse_lock); | 593 | kobject_put(&fc->kobj); |
| 500 | fuse_release_conn(fc); | ||
| 501 | spin_unlock(&fuse_lock); | ||
| 502 | return err; | 594 | return err; |
| 503 | } | 595 | } |
| 504 | 596 | ||
| @@ -516,6 +608,69 @@ static struct file_system_type fuse_fs_type = { | |||
| 516 | .kill_sb = kill_anon_super, | 608 | .kill_sb = kill_anon_super, |
| 517 | }; | 609 | }; |
| 518 | 610 | ||
| 611 | static ssize_t fuse_conn_waiting_show(struct fuse_conn *fc, char *page) | ||
| 612 | { | ||
| 613 | return sprintf(page, "%i\n", atomic_read(&fc->num_waiting)); | ||
| 614 | } | ||
| 615 | |||
| 616 | static ssize_t fuse_conn_abort_store(struct fuse_conn *fc, const char *page, | ||
| 617 | size_t count) | ||
| 618 | { | ||
| 619 | fuse_abort_conn(fc); | ||
| 620 | return count; | ||
| 621 | } | ||
| 622 | |||
| 623 | static struct fuse_conn_attr fuse_conn_waiting = | ||
| 624 | __ATTR(waiting, 0400, fuse_conn_waiting_show, NULL); | ||
| 625 | static struct fuse_conn_attr fuse_conn_abort = | ||
| 626 | __ATTR(abort, 0600, NULL, fuse_conn_abort_store); | ||
| 627 | |||
| 628 | static struct attribute *fuse_conn_attrs[] = { | ||
| 629 | &fuse_conn_waiting.attr, | ||
| 630 | &fuse_conn_abort.attr, | ||
| 631 | NULL, | ||
| 632 | }; | ||
| 633 | |||
| 634 | static ssize_t fuse_conn_attr_show(struct kobject *kobj, | ||
| 635 | struct attribute *attr, | ||
| 636 | char *page) | ||
| 637 | { | ||
| 638 | struct fuse_conn_attr *fca = | ||
| 639 | container_of(attr, struct fuse_conn_attr, attr); | ||
| 640 | |||
| 641 | if (fca->show) | ||
| 642 | return fca->show(get_fuse_conn_kobj(kobj), page); | ||
| 643 | else | ||
| 644 | return -EACCES; | ||
| 645 | } | ||
| 646 | |||
| 647 | static ssize_t fuse_conn_attr_store(struct kobject *kobj, | ||
| 648 | struct attribute *attr, | ||
| 649 | const char *page, size_t count) | ||
| 650 | { | ||
| 651 | struct fuse_conn_attr *fca = | ||
| 652 | container_of(attr, struct fuse_conn_attr, attr); | ||
| 653 | |||
| 654 | if (fca->store) | ||
| 655 | return fca->store(get_fuse_conn_kobj(kobj), page, count); | ||
| 656 | else | ||
| 657 | return -EACCES; | ||
| 658 | } | ||
| 659 | |||
| 660 | static struct sysfs_ops fuse_conn_sysfs_ops = { | ||
| 661 | .show = &fuse_conn_attr_show, | ||
| 662 | .store = &fuse_conn_attr_store, | ||
| 663 | }; | ||
| 664 | |||
| 665 | static struct kobj_type ktype_fuse_conn = { | ||
| 666 | .release = fuse_conn_release, | ||
| 667 | .sysfs_ops = &fuse_conn_sysfs_ops, | ||
| 668 | .default_attrs = fuse_conn_attrs, | ||
| 669 | }; | ||
| 670 | |||
| 671 | static decl_subsys(fuse, NULL, NULL); | ||
| 672 | static decl_subsys(connections, &ktype_fuse_conn, NULL); | ||
| 673 | |||
| 519 | static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, | 674 | static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, |
| 520 | unsigned long flags) | 675 | unsigned long flags) |
| 521 | { | 676 | { |
| @@ -553,6 +708,34 @@ static void fuse_fs_cleanup(void) | |||
| 553 | kmem_cache_destroy(fuse_inode_cachep); | 708 | kmem_cache_destroy(fuse_inode_cachep); |
| 554 | } | 709 | } |
| 555 | 710 | ||
| 711 | static int fuse_sysfs_init(void) | ||
| 712 | { | ||
| 713 | int err; | ||
| 714 | |||
| 715 | kset_set_kset_s(&fuse_subsys, fs_subsys); | ||
| 716 | err = subsystem_register(&fuse_subsys); | ||
| 717 | if (err) | ||
| 718 | goto out_err; | ||
| 719 | |||
| 720 | kset_set_kset_s(&connections_subsys, fuse_subsys); | ||
| 721 | err = subsystem_register(&connections_subsys); | ||
| 722 | if (err) | ||
| 723 | goto out_fuse_unregister; | ||
| 724 | |||
| 725 | return 0; | ||
| 726 | |||
| 727 | out_fuse_unregister: | ||
| 728 | subsystem_unregister(&fuse_subsys); | ||
| 729 | out_err: | ||
| 730 | return err; | ||
| 731 | } | ||
| 732 | |||
| 733 | static void fuse_sysfs_cleanup(void) | ||
| 734 | { | ||
| 735 | subsystem_unregister(&connections_subsys); | ||
| 736 | subsystem_unregister(&fuse_subsys); | ||
| 737 | } | ||
| 738 | |||
| 556 | static int __init fuse_init(void) | 739 | static int __init fuse_init(void) |
| 557 | { | 740 | { |
| 558 | int res; | 741 | int res; |
| @@ -569,8 +752,14 @@ static int __init fuse_init(void) | |||
| 569 | if (res) | 752 | if (res) |
| 570 | goto err_fs_cleanup; | 753 | goto err_fs_cleanup; |
| 571 | 754 | ||
| 755 | res = fuse_sysfs_init(); | ||
| 756 | if (res) | ||
| 757 | goto err_dev_cleanup; | ||
| 758 | |||
| 572 | return 0; | 759 | return 0; |
| 573 | 760 | ||
| 761 | err_dev_cleanup: | ||
| 762 | fuse_dev_cleanup(); | ||
| 574 | err_fs_cleanup: | 763 | err_fs_cleanup: |
| 575 | fuse_fs_cleanup(); | 764 | fuse_fs_cleanup(); |
| 576 | err: | 765 | err: |
| @@ -581,6 +770,7 @@ static void __exit fuse_exit(void) | |||
| 581 | { | 770 | { |
| 582 | printk(KERN_DEBUG "fuse exit\n"); | 771 | printk(KERN_DEBUG "fuse exit\n"); |
| 583 | 772 | ||
| 773 | fuse_sysfs_cleanup(); | ||
| 584 | fuse_fs_cleanup(); | 774 | fuse_fs_cleanup(); |
| 585 | fuse_dev_cleanup(); | 775 | fuse_dev_cleanup(); |
| 586 | } | 776 | } |
