aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c237
1 files changed, 148 insertions, 89 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8f873e621f41..4526da8907c6 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -21,18 +21,18 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21 21
22static kmem_cache_t *fuse_req_cachep; 22static kmem_cache_t *fuse_req_cachep;
23 23
24static inline struct fuse_conn *fuse_get_conn(struct file *file) 24static struct fuse_conn *fuse_get_conn(struct file *file)
25{ 25{
26 struct fuse_conn *fc; 26 struct fuse_conn *fc;
27 spin_lock(&fuse_lock); 27 spin_lock(&fuse_lock);
28 fc = file->private_data; 28 fc = file->private_data;
29 if (fc && !fc->mounted) 29 if (fc && !fc->connected)
30 fc = NULL; 30 fc = NULL;
31 spin_unlock(&fuse_lock); 31 spin_unlock(&fuse_lock);
32 return fc; 32 return fc;
33} 33}
34 34
35static inline void fuse_request_init(struct fuse_req *req) 35static void fuse_request_init(struct fuse_req *req)
36{ 36{
37 memset(req, 0, sizeof(*req)); 37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list); 38 INIT_LIST_HEAD(&req->list);
@@ -53,7 +53,7 @@ void fuse_request_free(struct fuse_req *req)
53 kmem_cache_free(fuse_req_cachep, req); 53 kmem_cache_free(fuse_req_cachep, req);
54} 54}
55 55
56static inline void block_sigs(sigset_t *oldset) 56static void block_sigs(sigset_t *oldset)
57{ 57{
58 sigset_t mask; 58 sigset_t mask;
59 59
@@ -61,7 +61,7 @@ static inline void block_sigs(sigset_t *oldset)
61 sigprocmask(SIG_BLOCK, &mask, oldset); 61 sigprocmask(SIG_BLOCK, &mask, oldset);
62} 62}
63 63
64static inline void restore_sigs(sigset_t *oldset) 64static void restore_sigs(sigset_t *oldset)
65{ 65{
66 sigprocmask(SIG_SETMASK, oldset, NULL); 66 sigprocmask(SIG_SETMASK, oldset, NULL);
67} 67}
@@ -109,18 +109,24 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc)
109 int intr; 109 int intr;
110 sigset_t oldset; 110 sigset_t oldset;
111 111
112 atomic_inc(&fc->num_waiting);
112 block_sigs(&oldset); 113 block_sigs(&oldset);
113 intr = down_interruptible(&fc->outstanding_sem); 114 intr = down_interruptible(&fc->outstanding_sem);
114 restore_sigs(&oldset); 115 restore_sigs(&oldset);
115 return intr ? NULL : do_get_request(fc); 116 if (intr) {
117 atomic_dec(&fc->num_waiting);
118 return NULL;
119 }
120 return do_get_request(fc);
116} 121}
117 122
118static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) 123static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
119{ 124{
120 spin_lock(&fuse_lock); 125 spin_lock(&fuse_lock);
121 if (req->preallocated) 126 if (req->preallocated) {
127 atomic_dec(&fc->num_waiting);
122 list_add(&req->list, &fc->unused_list); 128 list_add(&req->list, &fc->unused_list);
123 else 129 } else
124 fuse_request_free(req); 130 fuse_request_free(req);
125 131
126 /* If we are in debt decrease that first */ 132 /* If we are in debt decrease that first */
@@ -151,19 +157,20 @@ void fuse_release_background(struct fuse_req *req)
151/* 157/*
152 * This function is called when a request is finished. Either a reply 158 * This function is called when a request is finished. Either a reply
153 * has arrived or it was interrupted (and not yet sent) or some error 159 * has arrived or it was interrupted (and not yet sent) or some error
154 * occurred during communication with userspace, or the device file was 160 * occurred during communication with userspace, or the device file
155 * closed. It decreases the reference count for the request. In case 161 * was closed. In case of a background request the reference to the
156 * of a background request the reference to the stored objects are 162 * stored objects are released. The requester thread is woken up (if
157 * released. The requester thread is woken up (if still waiting), and 163 * still waiting), the 'end' callback is called if given, else the
158 * finally the request is either freed or put on the unused_list 164 * reference to the request is released
159 * 165 *
160 * Called with fuse_lock, unlocks it 166 * Called with fuse_lock, unlocks it
161 */ 167 */
162static void request_end(struct fuse_conn *fc, struct fuse_req *req) 168static void request_end(struct fuse_conn *fc, struct fuse_req *req)
163{ 169{
164 int putback; 170 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
165 req->finished = 1; 171 req->end = NULL;
166 putback = atomic_dec_and_test(&req->count); 172 list_del(&req->list);
173 req->state = FUSE_REQ_FINISHED;
167 spin_unlock(&fuse_lock); 174 spin_unlock(&fuse_lock);
168 if (req->background) { 175 if (req->background) {
169 down_read(&fc->sbput_sem); 176 down_read(&fc->sbput_sem);
@@ -172,28 +179,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
172 up_read(&fc->sbput_sem); 179 up_read(&fc->sbput_sem);
173 } 180 }
174 wake_up(&req->waitq); 181 wake_up(&req->waitq);
175 if (req->in.h.opcode == FUSE_INIT) { 182 if (end)
176 int i; 183 end(fc, req);
177 184 else
178 if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION) 185 fuse_put_request(fc, req);
179 fc->conn_error = 1;
180
181 /* After INIT reply is received other requests can go
182 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
183 up()s on outstanding_sem. The last up() is done in
184 fuse_putback_request() */
185 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
186 up(&fc->outstanding_sem);
187 } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
188 /* Special case for failed iget in CREATE */
189 u64 nodeid = req->in.h.nodeid;
190 __fuse_get_request(req);
191 fuse_reset_request(req);
192 fuse_send_forget(fc, req, nodeid, 1);
193 putback = 0;
194 }
195 if (putback)
196 fuse_putback_request(fc, req);
197} 186}
198 187
199/* 188/*
@@ -244,14 +233,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
244 233
245 spin_unlock(&fuse_lock); 234 spin_unlock(&fuse_lock);
246 block_sigs(&oldset); 235 block_sigs(&oldset);
247 wait_event_interruptible(req->waitq, req->finished); 236 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
248 restore_sigs(&oldset); 237 restore_sigs(&oldset);
249 spin_lock(&fuse_lock); 238 spin_lock(&fuse_lock);
250 if (req->finished) 239 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
251 return; 240 return;
252 241
253 req->out.h.error = -EINTR; 242 if (!req->interrupted) {
254 req->interrupted = 1; 243 req->out.h.error = -EINTR;
244 req->interrupted = 1;
245 }
255 if (req->locked) { 246 if (req->locked) {
256 /* This is uninterruptible sleep, because data is 247 /* This is uninterruptible sleep, because data is
257 being copied to/from the buffers of req. During 248 being copied to/from the buffers of req. During
@@ -262,10 +253,10 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
262 wait_event(req->waitq, !req->locked); 253 wait_event(req->waitq, !req->locked);
263 spin_lock(&fuse_lock); 254 spin_lock(&fuse_lock);
264 } 255 }
265 if (!req->sent && !list_empty(&req->list)) { 256 if (req->state == FUSE_REQ_PENDING) {
266 list_del(&req->list); 257 list_del(&req->list);
267 __fuse_put_request(req); 258 __fuse_put_request(req);
268 } else if (!req->finished && req->sent) 259 } else if (req->state == FUSE_REQ_SENT)
269 background_request(fc, req); 260 background_request(fc, req);
270} 261}
271 262
@@ -300,6 +291,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
300 fc->outstanding_debt++; 291 fc->outstanding_debt++;
301 } 292 }
302 list_add_tail(&req->list, &fc->pending); 293 list_add_tail(&req->list, &fc->pending);
294 req->state = FUSE_REQ_PENDING;
303 wake_up(&fc->waitq); 295 wake_up(&fc->waitq);
304} 296}
305 297
@@ -352,30 +344,12 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
352 request_send_nowait(fc, req); 344 request_send_nowait(fc, req);
353} 345}
354 346
355void fuse_send_init(struct fuse_conn *fc)
356{
357 /* This is called from fuse_read_super() so there's guaranteed
358 to be a request available */
359 struct fuse_req *req = do_get_request(fc);
360 struct fuse_init_in_out *arg = &req->misc.init_in_out;
361 arg->major = FUSE_KERNEL_VERSION;
362 arg->minor = FUSE_KERNEL_MINOR_VERSION;
363 req->in.h.opcode = FUSE_INIT;
364 req->in.numargs = 1;
365 req->in.args[0].size = sizeof(*arg);
366 req->in.args[0].value = arg;
367 req->out.numargs = 1;
368 req->out.args[0].size = sizeof(*arg);
369 req->out.args[0].value = arg;
370 request_send_background(fc, req);
371}
372
373/* 347/*
374 * Lock the request. Up to the next unlock_request() there mustn't be 348 * Lock the request. Up to the next unlock_request() there mustn't be
375 * anything that could cause a page-fault. If the request was already 349 * anything that could cause a page-fault. If the request was already
376 * interrupted bail out. 350 * interrupted bail out.
377 */ 351 */
378static inline int lock_request(struct fuse_req *req) 352static int lock_request(struct fuse_req *req)
379{ 353{
380 int err = 0; 354 int err = 0;
381 if (req) { 355 if (req) {
@@ -394,7 +368,7 @@ static inline int lock_request(struct fuse_req *req)
394 * requester thread is currently waiting for it to be unlocked, so 368 * requester thread is currently waiting for it to be unlocked, so
395 * wake it up. 369 * wake it up.
396 */ 370 */
397static inline void unlock_request(struct fuse_req *req) 371static void unlock_request(struct fuse_req *req)
398{ 372{
399 if (req) { 373 if (req) {
400 spin_lock(&fuse_lock); 374 spin_lock(&fuse_lock);
@@ -430,7 +404,7 @@ static void fuse_copy_init(struct fuse_copy_state *cs, int write,
430} 404}
431 405
432/* Unmap and put previous page of userspace buffer */ 406/* Unmap and put previous page of userspace buffer */
433static inline void fuse_copy_finish(struct fuse_copy_state *cs) 407static void fuse_copy_finish(struct fuse_copy_state *cs)
434{ 408{
435 if (cs->mapaddr) { 409 if (cs->mapaddr) {
436 kunmap_atomic(cs->mapaddr, KM_USER0); 410 kunmap_atomic(cs->mapaddr, KM_USER0);
@@ -479,8 +453,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
479} 453}
480 454
481/* Do as much copy to/from userspace buffer as we can */ 455/* Do as much copy to/from userspace buffer as we can */
482static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, 456static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
483 unsigned *size)
484{ 457{
485 unsigned ncpy = min(*size, cs->len); 458 unsigned ncpy = min(*size, cs->len);
486 if (val) { 459 if (val) {
@@ -500,8 +473,8 @@ static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val,
500 * Copy a page in the request to/from the userspace buffer. Must be 473 * Copy a page in the request to/from the userspace buffer. Must be
501 * done atomically 474 * done atomically
502 */ 475 */
503static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, 476static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
504 unsigned offset, unsigned count, int zeroing) 477 unsigned offset, unsigned count, int zeroing)
505{ 478{
506 if (page && zeroing && count < PAGE_SIZE) { 479 if (page && zeroing && count < PAGE_SIZE) {
507 void *mapaddr = kmap_atomic(page, KM_USER1); 480 void *mapaddr = kmap_atomic(page, KM_USER1);
@@ -583,7 +556,7 @@ static void request_wait(struct fuse_conn *fc)
583 DECLARE_WAITQUEUE(wait, current); 556 DECLARE_WAITQUEUE(wait, current);
584 557
585 add_wait_queue_exclusive(&fc->waitq, &wait); 558 add_wait_queue_exclusive(&fc->waitq, &wait);
586 while (fc->mounted && list_empty(&fc->pending)) { 559 while (fc->connected && list_empty(&fc->pending)) {
587 set_current_state(TASK_INTERRUPTIBLE); 560 set_current_state(TASK_INTERRUPTIBLE);
588 if (signal_pending(current)) 561 if (signal_pending(current))
589 break; 562 break;
@@ -615,6 +588,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
615 struct fuse_copy_state cs; 588 struct fuse_copy_state cs;
616 unsigned reqsize; 589 unsigned reqsize;
617 590
591 restart:
618 spin_lock(&fuse_lock); 592 spin_lock(&fuse_lock);
619 fc = file->private_data; 593 fc = file->private_data;
620 err = -EPERM; 594 err = -EPERM;
@@ -622,28 +596,34 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
622 goto err_unlock; 596 goto err_unlock;
623 request_wait(fc); 597 request_wait(fc);
624 err = -ENODEV; 598 err = -ENODEV;
625 if (!fc->mounted) 599 if (!fc->connected)
626 goto err_unlock; 600 goto err_unlock;
627 err = -ERESTARTSYS; 601 err = -ERESTARTSYS;
628 if (list_empty(&fc->pending)) 602 if (list_empty(&fc->pending))
629 goto err_unlock; 603 goto err_unlock;
630 604
631 req = list_entry(fc->pending.next, struct fuse_req, list); 605 req = list_entry(fc->pending.next, struct fuse_req, list);
632 list_del_init(&req->list); 606 req->state = FUSE_REQ_READING;
633 spin_unlock(&fuse_lock); 607 list_move(&req->list, &fc->io);
634 608
635 in = &req->in; 609 in = &req->in;
636 reqsize = req->in.h.len; 610 reqsize = in->h.len;
637 fuse_copy_init(&cs, 1, req, iov, nr_segs); 611 /* If request is too large, reply with an error and restart the read */
638 err = -EINVAL; 612 if (iov_length(iov, nr_segs) < reqsize) {
639 if (iov_length(iov, nr_segs) >= reqsize) { 613 req->out.h.error = -EIO;
640 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 614 /* SETXATTR is special, since it may contain too large data */
641 if (!err) 615 if (in->h.opcode == FUSE_SETXATTR)
642 err = fuse_copy_args(&cs, in->numargs, in->argpages, 616 req->out.h.error = -E2BIG;
643 (struct fuse_arg *) in->args, 0); 617 request_end(fc, req);
618 goto restart;
644 } 619 }
620 spin_unlock(&fuse_lock);
621 fuse_copy_init(&cs, 1, req, iov, nr_segs);
622 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
623 if (!err)
624 err = fuse_copy_args(&cs, in->numargs, in->argpages,
625 (struct fuse_arg *) in->args, 0);
645 fuse_copy_finish(&cs); 626 fuse_copy_finish(&cs);
646
647 spin_lock(&fuse_lock); 627 spin_lock(&fuse_lock);
648 req->locked = 0; 628 req->locked = 0;
649 if (!err && req->interrupted) 629 if (!err && req->interrupted)
@@ -657,8 +637,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
657 if (!req->isreply) 637 if (!req->isreply)
658 request_end(fc, req); 638 request_end(fc, req);
659 else { 639 else {
660 req->sent = 1; 640 req->state = FUSE_REQ_SENT;
661 list_add_tail(&req->list, &fc->processing); 641 list_move_tail(&req->list, &fc->processing);
662 spin_unlock(&fuse_lock); 642 spin_unlock(&fuse_lock);
663 } 643 }
664 return reqsize; 644 return reqsize;
@@ -746,17 +726,23 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
746 goto err_finish; 726 goto err_finish;
747 727
748 spin_lock(&fuse_lock); 728 spin_lock(&fuse_lock);
729 err = -ENOENT;
730 if (!fc->connected)
731 goto err_unlock;
732
749 req = request_find(fc, oh.unique); 733 req = request_find(fc, oh.unique);
750 err = -EINVAL; 734 err = -EINVAL;
751 if (!req) 735 if (!req)
752 goto err_unlock; 736 goto err_unlock;
753 737
754 list_del_init(&req->list);
755 if (req->interrupted) { 738 if (req->interrupted) {
756 request_end(fc, req); 739 spin_unlock(&fuse_lock);
757 fuse_copy_finish(&cs); 740 fuse_copy_finish(&cs);
741 spin_lock(&fuse_lock);
742 request_end(fc, req);
758 return -ENOENT; 743 return -ENOENT;
759 } 744 }
745 list_move(&req->list, &fc->io);
760 req->out.h = oh; 746 req->out.h = oh;
761 req->locked = 1; 747 req->locked = 1;
762 cs.req = req; 748 cs.req = req;
@@ -810,19 +796,90 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
810 return mask; 796 return mask;
811} 797}
812 798
813/* Abort all requests on the given list (pending or processing) */ 799/*
800 * Abort all requests on the given list (pending or processing)
801 *
802 * This function releases and reacquires fuse_lock
803 */
814static void end_requests(struct fuse_conn *fc, struct list_head *head) 804static void end_requests(struct fuse_conn *fc, struct list_head *head)
815{ 805{
816 while (!list_empty(head)) { 806 while (!list_empty(head)) {
817 struct fuse_req *req; 807 struct fuse_req *req;
818 req = list_entry(head->next, struct fuse_req, list); 808 req = list_entry(head->next, struct fuse_req, list);
819 list_del_init(&req->list);
820 req->out.h.error = -ECONNABORTED; 809 req->out.h.error = -ECONNABORTED;
821 request_end(fc, req); 810 request_end(fc, req);
822 spin_lock(&fuse_lock); 811 spin_lock(&fuse_lock);
823 } 812 }
824} 813}
825 814
815/*
816 * Abort requests under I/O
817 *
818 * The requests are set to interrupted and finished, and the request
819 * waiter is woken up. This will make request_wait_answer() wait
820 * until the request is unlocked and then return.
821 *
822 * If the request is asynchronous, then the end function needs to be
823 * called after waiting for the request to be unlocked (if it was
824 * locked).
825 */
826static void end_io_requests(struct fuse_conn *fc)
827{
828 while (!list_empty(&fc->io)) {
829 struct fuse_req *req =
830 list_entry(fc->io.next, struct fuse_req, list);
831 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
832
833 req->interrupted = 1;
834 req->out.h.error = -ECONNABORTED;
835 req->state = FUSE_REQ_FINISHED;
836 list_del_init(&req->list);
837 wake_up(&req->waitq);
838 if (end) {
839 req->end = NULL;
840 /* The end function will consume this reference */
841 __fuse_get_request(req);
842 spin_unlock(&fuse_lock);
843 wait_event(req->waitq, !req->locked);
844 end(fc, req);
845 spin_lock(&fuse_lock);
846 }
847 }
848}
849
850/*
851 * Abort all requests.
852 *
853 * Emergency exit in case of a malicious or accidental deadlock, or
854 * just a hung filesystem.
855 *
856 * The same effect is usually achievable through killing the
857 * filesystem daemon and all users of the filesystem. The exception
858 * is the combination of an asynchronous request and the tricky
859 * deadlock (see Documentation/filesystems/fuse.txt).
860 *
861 * During the aborting, progression of requests from the pending and
862 * processing lists onto the io list, and progression of new requests
863 * onto the pending list is prevented by req->connected being false.
864 *
865 * Progression of requests under I/O to the processing list is
866 * prevented by the req->interrupted flag being true for these
867 * requests. For this reason requests on the io list must be aborted
868 * first.
869 */
870void fuse_abort_conn(struct fuse_conn *fc)
871{
872 spin_lock(&fuse_lock);
873 if (fc->connected) {
874 fc->connected = 0;
875 end_io_requests(fc);
876 end_requests(fc, &fc->pending);
877 end_requests(fc, &fc->processing);
878 wake_up_all(&fc->waitq);
879 }
880 spin_unlock(&fuse_lock);
881}
882
826static int fuse_dev_release(struct inode *inode, struct file *file) 883static int fuse_dev_release(struct inode *inode, struct file *file)
827{ 884{
828 struct fuse_conn *fc; 885 struct fuse_conn *fc;
@@ -833,9 +890,11 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
833 fc->connected = 0; 890 fc->connected = 0;
834 end_requests(fc, &fc->pending); 891 end_requests(fc, &fc->pending);
835 end_requests(fc, &fc->processing); 892 end_requests(fc, &fc->processing);
836 fuse_release_conn(fc);
837 } 893 }
838 spin_unlock(&fuse_lock); 894 spin_unlock(&fuse_lock);
895 if (fc)
896 kobject_put(&fc->kobj);
897
839 return 0; 898 return 0;
840} 899}
841 900