diff options
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 122 |
1 files changed, 62 insertions, 60 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index c510533c6849..63d2cf43b5e3 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | FUSE: Filesystem in Userspace | 2 | FUSE: Filesystem in Userspace |
3 | Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> | 3 | Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> |
4 | 4 | ||
5 | This program can be distributed under the terms of the GNU GPL. | 5 | This program can be distributed under the terms of the GNU GPL. |
6 | See the file COPYING. | 6 | See the file COPYING. |
@@ -94,11 +94,11 @@ static struct fuse_req *do_get_request(struct fuse_conn *fc) | |||
94 | { | 94 | { |
95 | struct fuse_req *req; | 95 | struct fuse_req *req; |
96 | 96 | ||
97 | spin_lock(&fuse_lock); | 97 | spin_lock(&fc->lock); |
98 | BUG_ON(list_empty(&fc->unused_list)); | 98 | BUG_ON(list_empty(&fc->unused_list)); |
99 | req = list_entry(fc->unused_list.next, struct fuse_req, list); | 99 | req = list_entry(fc->unused_list.next, struct fuse_req, list); |
100 | list_del_init(&req->list); | 100 | list_del_init(&req->list); |
101 | spin_unlock(&fuse_lock); | 101 | spin_unlock(&fc->lock); |
102 | fuse_request_init(req); | 102 | fuse_request_init(req); |
103 | req->preallocated = 1; | 103 | req->preallocated = 1; |
104 | req->in.h.uid = current->fsuid; | 104 | req->in.h.uid = current->fsuid; |
@@ -124,7 +124,7 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc) | |||
124 | return do_get_request(fc); | 124 | return do_get_request(fc); |
125 | } | 125 | } |
126 | 126 | ||
127 | /* Must be called with fuse_lock held */ | 127 | /* Must be called with fc->lock held */ |
128 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) | 128 | static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) |
129 | { | 129 | { |
130 | if (req->preallocated) { | 130 | if (req->preallocated) { |
@@ -143,9 +143,9 @@ static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) | |||
143 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | 143 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
144 | { | 144 | { |
145 | if (atomic_dec_and_test(&req->count)) { | 145 | if (atomic_dec_and_test(&req->count)) { |
146 | spin_lock(&fuse_lock); | 146 | spin_lock(&fc->lock); |
147 | fuse_putback_request(fc, req); | 147 | fuse_putback_request(fc, req); |
148 | spin_unlock(&fuse_lock); | 148 | spin_unlock(&fc->lock); |
149 | } | 149 | } |
150 | } | 150 | } |
151 | 151 | ||
@@ -155,15 +155,15 @@ static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req) | |||
155 | fuse_putback_request(fc, req); | 155 | fuse_putback_request(fc, req); |
156 | } | 156 | } |
157 | 157 | ||
158 | void fuse_release_background(struct fuse_req *req) | 158 | void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) |
159 | { | 159 | { |
160 | iput(req->inode); | 160 | iput(req->inode); |
161 | iput(req->inode2); | 161 | iput(req->inode2); |
162 | if (req->file) | 162 | if (req->file) |
163 | fput(req->file); | 163 | fput(req->file); |
164 | spin_lock(&fuse_lock); | 164 | spin_lock(&fc->lock); |
165 | list_del(&req->bg_entry); | 165 | list_del(&req->bg_entry); |
166 | spin_unlock(&fuse_lock); | 166 | spin_unlock(&fc->lock); |
167 | } | 167 | } |
168 | 168 | ||
169 | /* | 169 | /* |
@@ -182,7 +182,7 @@ void fuse_release_background(struct fuse_req *req) | |||
182 | * interrupted and put in the background, it will return with an error | 182 | * interrupted and put in the background, it will return with an error |
183 | * and hence never be reset and reused. | 183 | * and hence never be reset and reused. |
184 | * | 184 | * |
185 | * Called with fuse_lock, unlocks it | 185 | * Called with fc->lock, unlocks it |
186 | */ | 186 | */ |
187 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 187 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
188 | { | 188 | { |
@@ -191,14 +191,14 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req) | |||
191 | if (!req->background) { | 191 | if (!req->background) { |
192 | wake_up(&req->waitq); | 192 | wake_up(&req->waitq); |
193 | fuse_put_request_locked(fc, req); | 193 | fuse_put_request_locked(fc, req); |
194 | spin_unlock(&fuse_lock); | 194 | spin_unlock(&fc->lock); |
195 | } else { | 195 | } else { |
196 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 196 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; |
197 | req->end = NULL; | 197 | req->end = NULL; |
198 | spin_unlock(&fuse_lock); | 198 | spin_unlock(&fc->lock); |
199 | down_read(&fc->sbput_sem); | 199 | down_read(&fc->sbput_sem); |
200 | if (fc->mounted) | 200 | if (fc->mounted) |
201 | fuse_release_background(req); | 201 | fuse_release_background(fc, req); |
202 | up_read(&fc->sbput_sem); | 202 | up_read(&fc->sbput_sem); |
203 | if (end) | 203 | if (end) |
204 | end(fc, req); | 204 | end(fc, req); |
@@ -248,16 +248,16 @@ static void background_request(struct fuse_conn *fc, struct fuse_req *req) | |||
248 | get_file(req->file); | 248 | get_file(req->file); |
249 | } | 249 | } |
250 | 250 | ||
251 | /* Called with fuse_lock held. Releases, and then reacquires it. */ | 251 | /* Called with fc->lock held. Releases, and then reacquires it. */ |
252 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | 252 | static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
253 | { | 253 | { |
254 | sigset_t oldset; | 254 | sigset_t oldset; |
255 | 255 | ||
256 | spin_unlock(&fuse_lock); | 256 | spin_unlock(&fc->lock); |
257 | block_sigs(&oldset); | 257 | block_sigs(&oldset); |
258 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); | 258 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); |
259 | restore_sigs(&oldset); | 259 | restore_sigs(&oldset); |
260 | spin_lock(&fuse_lock); | 260 | spin_lock(&fc->lock); |
261 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) | 261 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
262 | return; | 262 | return; |
263 | 263 | ||
@@ -271,9 +271,9 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
271 | locked state, there mustn't be any filesystem | 271 | locked state, there mustn't be any filesystem |
272 | operation (e.g. page fault), since that could lead | 272 | operation (e.g. page fault), since that could lead |
273 | to deadlock */ | 273 | to deadlock */ |
274 | spin_unlock(&fuse_lock); | 274 | spin_unlock(&fc->lock); |
275 | wait_event(req->waitq, !req->locked); | 275 | wait_event(req->waitq, !req->locked); |
276 | spin_lock(&fuse_lock); | 276 | spin_lock(&fc->lock); |
277 | } | 277 | } |
278 | if (req->state == FUSE_REQ_PENDING) { | 278 | if (req->state == FUSE_REQ_PENDING) { |
279 | list_del(&req->list); | 279 | list_del(&req->list); |
@@ -324,7 +324,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) | |||
324 | void request_send(struct fuse_conn *fc, struct fuse_req *req) | 324 | void request_send(struct fuse_conn *fc, struct fuse_req *req) |
325 | { | 325 | { |
326 | req->isreply = 1; | 326 | req->isreply = 1; |
327 | spin_lock(&fuse_lock); | 327 | spin_lock(&fc->lock); |
328 | if (!fc->connected) | 328 | if (!fc->connected) |
329 | req->out.h.error = -ENOTCONN; | 329 | req->out.h.error = -ENOTCONN; |
330 | else if (fc->conn_error) | 330 | else if (fc->conn_error) |
@@ -337,15 +337,15 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req) | |||
337 | 337 | ||
338 | request_wait_answer(fc, req); | 338 | request_wait_answer(fc, req); |
339 | } | 339 | } |
340 | spin_unlock(&fuse_lock); | 340 | spin_unlock(&fc->lock); |
341 | } | 341 | } |
342 | 342 | ||
343 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) | 343 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
344 | { | 344 | { |
345 | spin_lock(&fuse_lock); | 345 | spin_lock(&fc->lock); |
346 | if (fc->connected) { | 346 | if (fc->connected) { |
347 | queue_request(fc, req); | 347 | queue_request(fc, req); |
348 | spin_unlock(&fuse_lock); | 348 | spin_unlock(&fc->lock); |
349 | } else { | 349 | } else { |
350 | req->out.h.error = -ENOTCONN; | 350 | req->out.h.error = -ENOTCONN; |
351 | request_end(fc, req); | 351 | request_end(fc, req); |
@@ -361,9 +361,9 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) | |||
361 | void request_send_background(struct fuse_conn *fc, struct fuse_req *req) | 361 | void request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
362 | { | 362 | { |
363 | req->isreply = 1; | 363 | req->isreply = 1; |
364 | spin_lock(&fuse_lock); | 364 | spin_lock(&fc->lock); |
365 | background_request(fc, req); | 365 | background_request(fc, req); |
366 | spin_unlock(&fuse_lock); | 366 | spin_unlock(&fc->lock); |
367 | request_send_nowait(fc, req); | 367 | request_send_nowait(fc, req); |
368 | } | 368 | } |
369 | 369 | ||
@@ -372,16 +372,16 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req) | |||
372 | * anything that could cause a page-fault. If the request was already | 372 | * anything that could cause a page-fault. If the request was already |
373 | * interrupted bail out. | 373 | * interrupted bail out. |
374 | */ | 374 | */ |
375 | static int lock_request(struct fuse_req *req) | 375 | static int lock_request(struct fuse_conn *fc, struct fuse_req *req) |
376 | { | 376 | { |
377 | int err = 0; | 377 | int err = 0; |
378 | if (req) { | 378 | if (req) { |
379 | spin_lock(&fuse_lock); | 379 | spin_lock(&fc->lock); |
380 | if (req->interrupted) | 380 | if (req->interrupted) |
381 | err = -ENOENT; | 381 | err = -ENOENT; |
382 | else | 382 | else |
383 | req->locked = 1; | 383 | req->locked = 1; |
384 | spin_unlock(&fuse_lock); | 384 | spin_unlock(&fc->lock); |
385 | } | 385 | } |
386 | return err; | 386 | return err; |
387 | } | 387 | } |
@@ -391,18 +391,19 @@ static int lock_request(struct fuse_req *req) | |||
391 | * requester thread is currently waiting for it to be unlocked, so | 391 | * requester thread is currently waiting for it to be unlocked, so |
392 | * wake it up. | 392 | * wake it up. |
393 | */ | 393 | */ |
394 | static void unlock_request(struct fuse_req *req) | 394 | static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) |
395 | { | 395 | { |
396 | if (req) { | 396 | if (req) { |
397 | spin_lock(&fuse_lock); | 397 | spin_lock(&fc->lock); |
398 | req->locked = 0; | 398 | req->locked = 0; |
399 | if (req->interrupted) | 399 | if (req->interrupted) |
400 | wake_up(&req->waitq); | 400 | wake_up(&req->waitq); |
401 | spin_unlock(&fuse_lock); | 401 | spin_unlock(&fc->lock); |
402 | } | 402 | } |
403 | } | 403 | } |
404 | 404 | ||
405 | struct fuse_copy_state { | 405 | struct fuse_copy_state { |
406 | struct fuse_conn *fc; | ||
406 | int write; | 407 | int write; |
407 | struct fuse_req *req; | 408 | struct fuse_req *req; |
408 | const struct iovec *iov; | 409 | const struct iovec *iov; |
@@ -415,11 +416,12 @@ struct fuse_copy_state { | |||
415 | unsigned len; | 416 | unsigned len; |
416 | }; | 417 | }; |
417 | 418 | ||
418 | static void fuse_copy_init(struct fuse_copy_state *cs, int write, | 419 | static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc, |
419 | struct fuse_req *req, const struct iovec *iov, | 420 | int write, struct fuse_req *req, |
420 | unsigned long nr_segs) | 421 | const struct iovec *iov, unsigned long nr_segs) |
421 | { | 422 | { |
422 | memset(cs, 0, sizeof(*cs)); | 423 | memset(cs, 0, sizeof(*cs)); |
424 | cs->fc = fc; | ||
423 | cs->write = write; | 425 | cs->write = write; |
424 | cs->req = req; | 426 | cs->req = req; |
425 | cs->iov = iov; | 427 | cs->iov = iov; |
@@ -449,7 +451,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
449 | unsigned long offset; | 451 | unsigned long offset; |
450 | int err; | 452 | int err; |
451 | 453 | ||
452 | unlock_request(cs->req); | 454 | unlock_request(cs->fc, cs->req); |
453 | fuse_copy_finish(cs); | 455 | fuse_copy_finish(cs); |
454 | if (!cs->seglen) { | 456 | if (!cs->seglen) { |
455 | BUG_ON(!cs->nr_segs); | 457 | BUG_ON(!cs->nr_segs); |
@@ -472,7 +474,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) | |||
472 | cs->seglen -= cs->len; | 474 | cs->seglen -= cs->len; |
473 | cs->addr += cs->len; | 475 | cs->addr += cs->len; |
474 | 476 | ||
475 | return lock_request(cs->req); | 477 | return lock_request(cs->fc, cs->req); |
476 | } | 478 | } |
477 | 479 | ||
478 | /* Do as much copy to/from userspace buffer as we can */ | 480 | /* Do as much copy to/from userspace buffer as we can */ |
@@ -584,9 +586,9 @@ static void request_wait(struct fuse_conn *fc) | |||
584 | if (signal_pending(current)) | 586 | if (signal_pending(current)) |
585 | break; | 587 | break; |
586 | 588 | ||
587 | spin_unlock(&fuse_lock); | 589 | spin_unlock(&fc->lock); |
588 | schedule(); | 590 | schedule(); |
589 | spin_lock(&fuse_lock); | 591 | spin_lock(&fc->lock); |
590 | } | 592 | } |
591 | set_current_state(TASK_RUNNING); | 593 | set_current_state(TASK_RUNNING); |
592 | remove_wait_queue(&fc->waitq, &wait); | 594 | remove_wait_queue(&fc->waitq, &wait); |
@@ -614,7 +616,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
614 | return -EPERM; | 616 | return -EPERM; |
615 | 617 | ||
616 | restart: | 618 | restart: |
617 | spin_lock(&fuse_lock); | 619 | spin_lock(&fc->lock); |
618 | err = -EAGAIN; | 620 | err = -EAGAIN; |
619 | if ((file->f_flags & O_NONBLOCK) && fc->connected && | 621 | if ((file->f_flags & O_NONBLOCK) && fc->connected && |
620 | list_empty(&fc->pending)) | 622 | list_empty(&fc->pending)) |
@@ -643,14 +645,14 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
643 | request_end(fc, req); | 645 | request_end(fc, req); |
644 | goto restart; | 646 | goto restart; |
645 | } | 647 | } |
646 | spin_unlock(&fuse_lock); | 648 | spin_unlock(&fc->lock); |
647 | fuse_copy_init(&cs, 1, req, iov, nr_segs); | 649 | fuse_copy_init(&cs, fc, 1, req, iov, nr_segs); |
648 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); | 650 | err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); |
649 | if (!err) | 651 | if (!err) |
650 | err = fuse_copy_args(&cs, in->numargs, in->argpages, | 652 | err = fuse_copy_args(&cs, in->numargs, in->argpages, |
651 | (struct fuse_arg *) in->args, 0); | 653 | (struct fuse_arg *) in->args, 0); |
652 | fuse_copy_finish(&cs); | 654 | fuse_copy_finish(&cs); |
653 | spin_lock(&fuse_lock); | 655 | spin_lock(&fc->lock); |
654 | req->locked = 0; | 656 | req->locked = 0; |
655 | if (!err && req->interrupted) | 657 | if (!err && req->interrupted) |
656 | err = -ENOENT; | 658 | err = -ENOENT; |
@@ -665,12 +667,12 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov, | |||
665 | else { | 667 | else { |
666 | req->state = FUSE_REQ_SENT; | 668 | req->state = FUSE_REQ_SENT; |
667 | list_move_tail(&req->list, &fc->processing); | 669 | list_move_tail(&req->list, &fc->processing); |
668 | spin_unlock(&fuse_lock); | 670 | spin_unlock(&fc->lock); |
669 | } | 671 | } |
670 | return reqsize; | 672 | return reqsize; |
671 | 673 | ||
672 | err_unlock: | 674 | err_unlock: |
673 | spin_unlock(&fuse_lock); | 675 | spin_unlock(&fc->lock); |
674 | return err; | 676 | return err; |
675 | } | 677 | } |
676 | 678 | ||
@@ -739,7 +741,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
739 | if (!fc) | 741 | if (!fc) |
740 | return -ENODEV; | 742 | return -ENODEV; |
741 | 743 | ||
742 | fuse_copy_init(&cs, 0, NULL, iov, nr_segs); | 744 | fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs); |
743 | if (nbytes < sizeof(struct fuse_out_header)) | 745 | if (nbytes < sizeof(struct fuse_out_header)) |
744 | return -EINVAL; | 746 | return -EINVAL; |
745 | 747 | ||
@@ -751,7 +753,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
751 | oh.len != nbytes) | 753 | oh.len != nbytes) |
752 | goto err_finish; | 754 | goto err_finish; |
753 | 755 | ||
754 | spin_lock(&fuse_lock); | 756 | spin_lock(&fc->lock); |
755 | err = -ENOENT; | 757 | err = -ENOENT; |
756 | if (!fc->connected) | 758 | if (!fc->connected) |
757 | goto err_unlock; | 759 | goto err_unlock; |
@@ -762,9 +764,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
762 | goto err_unlock; | 764 | goto err_unlock; |
763 | 765 | ||
764 | if (req->interrupted) { | 766 | if (req->interrupted) { |
765 | spin_unlock(&fuse_lock); | 767 | spin_unlock(&fc->lock); |
766 | fuse_copy_finish(&cs); | 768 | fuse_copy_finish(&cs); |
767 | spin_lock(&fuse_lock); | 769 | spin_lock(&fc->lock); |
768 | request_end(fc, req); | 770 | request_end(fc, req); |
769 | return -ENOENT; | 771 | return -ENOENT; |
770 | } | 772 | } |
@@ -772,12 +774,12 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
772 | req->out.h = oh; | 774 | req->out.h = oh; |
773 | req->locked = 1; | 775 | req->locked = 1; |
774 | cs.req = req; | 776 | cs.req = req; |
775 | spin_unlock(&fuse_lock); | 777 | spin_unlock(&fc->lock); |
776 | 778 | ||
777 | err = copy_out_args(&cs, &req->out, nbytes); | 779 | err = copy_out_args(&cs, &req->out, nbytes); |
778 | fuse_copy_finish(&cs); | 780 | fuse_copy_finish(&cs); |
779 | 781 | ||
780 | spin_lock(&fuse_lock); | 782 | spin_lock(&fc->lock); |
781 | req->locked = 0; | 783 | req->locked = 0; |
782 | if (!err) { | 784 | if (!err) { |
783 | if (req->interrupted) | 785 | if (req->interrupted) |
@@ -789,7 +791,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov, | |||
789 | return err ? err : nbytes; | 791 | return err ? err : nbytes; |
790 | 792 | ||
791 | err_unlock: | 793 | err_unlock: |
792 | spin_unlock(&fuse_lock); | 794 | spin_unlock(&fc->lock); |
793 | err_finish: | 795 | err_finish: |
794 | fuse_copy_finish(&cs); | 796 | fuse_copy_finish(&cs); |
795 | return err; | 797 | return err; |
@@ -813,12 +815,12 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |||
813 | 815 | ||
814 | poll_wait(file, &fc->waitq, wait); | 816 | poll_wait(file, &fc->waitq, wait); |
815 | 817 | ||
816 | spin_lock(&fuse_lock); | 818 | spin_lock(&fc->lock); |
817 | if (!fc->connected) | 819 | if (!fc->connected) |
818 | mask = POLLERR; | 820 | mask = POLLERR; |
819 | else if (!list_empty(&fc->pending)) | 821 | else if (!list_empty(&fc->pending)) |
820 | mask |= POLLIN | POLLRDNORM; | 822 | mask |= POLLIN | POLLRDNORM; |
821 | spin_unlock(&fuse_lock); | 823 | spin_unlock(&fc->lock); |
822 | 824 | ||
823 | return mask; | 825 | return mask; |
824 | } | 826 | } |
@@ -826,7 +828,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) | |||
826 | /* | 828 | /* |
827 | * Abort all requests on the given list (pending or processing) | 829 | * Abort all requests on the given list (pending or processing) |
828 | * | 830 | * |
829 | * This function releases and reacquires fuse_lock | 831 | * This function releases and reacquires fc->lock |
830 | */ | 832 | */ |
831 | static void end_requests(struct fuse_conn *fc, struct list_head *head) | 833 | static void end_requests(struct fuse_conn *fc, struct list_head *head) |
832 | { | 834 | { |
@@ -835,7 +837,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head) | |||
835 | req = list_entry(head->next, struct fuse_req, list); | 837 | req = list_entry(head->next, struct fuse_req, list); |
836 | req->out.h.error = -ECONNABORTED; | 838 | req->out.h.error = -ECONNABORTED; |
837 | request_end(fc, req); | 839 | request_end(fc, req); |
838 | spin_lock(&fuse_lock); | 840 | spin_lock(&fc->lock); |
839 | } | 841 | } |
840 | } | 842 | } |
841 | 843 | ||
@@ -866,10 +868,10 @@ static void end_io_requests(struct fuse_conn *fc) | |||
866 | req->end = NULL; | 868 | req->end = NULL; |
867 | /* The end function will consume this reference */ | 869 | /* The end function will consume this reference */ |
868 | __fuse_get_request(req); | 870 | __fuse_get_request(req); |
869 | spin_unlock(&fuse_lock); | 871 | spin_unlock(&fc->lock); |
870 | wait_event(req->waitq, !req->locked); | 872 | wait_event(req->waitq, !req->locked); |
871 | end(fc, req); | 873 | end(fc, req); |
872 | spin_lock(&fuse_lock); | 874 | spin_lock(&fc->lock); |
873 | } | 875 | } |
874 | } | 876 | } |
875 | } | 877 | } |
@@ -896,7 +898,7 @@ static void end_io_requests(struct fuse_conn *fc) | |||
896 | */ | 898 | */ |
897 | void fuse_abort_conn(struct fuse_conn *fc) | 899 | void fuse_abort_conn(struct fuse_conn *fc) |
898 | { | 900 | { |
899 | spin_lock(&fuse_lock); | 901 | spin_lock(&fc->lock); |
900 | if (fc->connected) { | 902 | if (fc->connected) { |
901 | fc->connected = 0; | 903 | fc->connected = 0; |
902 | end_io_requests(fc); | 904 | end_io_requests(fc); |
@@ -905,18 +907,18 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
905 | wake_up_all(&fc->waitq); | 907 | wake_up_all(&fc->waitq); |
906 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 908 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
907 | } | 909 | } |
908 | spin_unlock(&fuse_lock); | 910 | spin_unlock(&fc->lock); |
909 | } | 911 | } |
910 | 912 | ||
911 | static int fuse_dev_release(struct inode *inode, struct file *file) | 913 | static int fuse_dev_release(struct inode *inode, struct file *file) |
912 | { | 914 | { |
913 | struct fuse_conn *fc = fuse_get_conn(file); | 915 | struct fuse_conn *fc = fuse_get_conn(file); |
914 | if (fc) { | 916 | if (fc) { |
915 | spin_lock(&fuse_lock); | 917 | spin_lock(&fc->lock); |
916 | fc->connected = 0; | 918 | fc->connected = 0; |
917 | end_requests(fc, &fc->pending); | 919 | end_requests(fc, &fc->pending); |
918 | end_requests(fc, &fc->processing); | 920 | end_requests(fc, &fc->processing); |
919 | spin_unlock(&fuse_lock); | 921 | spin_unlock(&fc->lock); |
920 | fasync_helper(-1, file, 0, &fc->fasync); | 922 | fasync_helper(-1, file, 0, &fc->fasync); |
921 | kobject_put(&fc->kobj); | 923 | kobject_put(&fc->kobj); |
922 | } | 924 | } |