aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r--fs/fuse/dev.c286
1 files changed, 139 insertions, 147 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 23d1f52eb1b8..cc750c68fe70 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1,6 +1,6 @@
1/* 1/*
2 FUSE: Filesystem in Userspace 2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4 4
5 This program can be distributed under the terms of the GNU GPL. 5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING. 6 See the file COPYING.
@@ -23,13 +23,11 @@ static kmem_cache_t *fuse_req_cachep;
23 23
24static struct fuse_conn *fuse_get_conn(struct file *file) 24static struct fuse_conn *fuse_get_conn(struct file *file)
25{ 25{
26 struct fuse_conn *fc; 26 /*
27 spin_lock(&fuse_lock); 27 * Lockless access is OK, because file->private data is set
28 fc = file->private_data; 28 * once during mount and is valid until the file is released.
29 if (fc && !fc->connected) 29 */
30 fc = NULL; 30 return file->private_data;
31 spin_unlock(&fuse_lock);
32 return fc;
33} 31}
34 32
35static void fuse_request_init(struct fuse_req *req) 33static void fuse_request_init(struct fuse_req *req)
@@ -74,10 +72,8 @@ static void restore_sigs(sigset_t *oldset)
74 */ 72 */
75void fuse_reset_request(struct fuse_req *req) 73void fuse_reset_request(struct fuse_req *req)
76{ 74{
77 int preallocated = req->preallocated;
78 BUG_ON(atomic_read(&req->count) != 1); 75 BUG_ON(atomic_read(&req->count) != 1);
79 fuse_request_init(req); 76 fuse_request_init(req);
80 req->preallocated = preallocated;
81} 77}
82 78
83static void __fuse_get_request(struct fuse_req *req) 79static void __fuse_get_request(struct fuse_req *req)
@@ -92,80 +88,54 @@ static void __fuse_put_request(struct fuse_req *req)
92 atomic_dec(&req->count); 88 atomic_dec(&req->count);
93} 89}
94 90
95static struct fuse_req *do_get_request(struct fuse_conn *fc) 91struct fuse_req *fuse_get_req(struct fuse_conn *fc)
96{ 92{
97 struct fuse_req *req; 93 struct fuse_req *req;
98
99 spin_lock(&fuse_lock);
100 BUG_ON(list_empty(&fc->unused_list));
101 req = list_entry(fc->unused_list.next, struct fuse_req, list);
102 list_del_init(&req->list);
103 spin_unlock(&fuse_lock);
104 fuse_request_init(req);
105 req->preallocated = 1;
106 req->in.h.uid = current->fsuid;
107 req->in.h.gid = current->fsgid;
108 req->in.h.pid = current->pid;
109 return req;
110}
111
112/* This can return NULL, but only in case it's interrupted by a SIGKILL */
113struct fuse_req *fuse_get_request(struct fuse_conn *fc)
114{
115 int intr;
116 sigset_t oldset; 94 sigset_t oldset;
95 int intr;
96 int err;
117 97
118 atomic_inc(&fc->num_waiting); 98 atomic_inc(&fc->num_waiting);
119 block_sigs(&oldset); 99 block_sigs(&oldset);
120 intr = down_interruptible(&fc->outstanding_sem); 100 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
121 restore_sigs(&oldset); 101 restore_sigs(&oldset);
122 if (intr) { 102 err = -EINTR;
123 atomic_dec(&fc->num_waiting); 103 if (intr)
124 return NULL; 104 goto out;
125 }
126 return do_get_request(fc);
127}
128 105
129/* Must be called with fuse_lock held */ 106 req = fuse_request_alloc();
130static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) 107 err = -ENOMEM;
131{ 108 if (!req)
132 if (req->preallocated) { 109 goto out;
133 atomic_dec(&fc->num_waiting);
134 list_add(&req->list, &fc->unused_list);
135 } else
136 fuse_request_free(req);
137 110
138 /* If we are in debt decrease that first */ 111 req->in.h.uid = current->fsuid;
139 if (fc->outstanding_debt) 112 req->in.h.gid = current->fsgid;
140 fc->outstanding_debt--; 113 req->in.h.pid = current->pid;
141 else 114 req->waiting = 1;
142 up(&fc->outstanding_sem); 115 return req;
116
117 out:
118 atomic_dec(&fc->num_waiting);
119 return ERR_PTR(err);
143} 120}
144 121
145void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 122void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
146{ 123{
147 if (atomic_dec_and_test(&req->count)) { 124 if (atomic_dec_and_test(&req->count)) {
148 spin_lock(&fuse_lock); 125 if (req->waiting)
149 fuse_putback_request(fc, req); 126 atomic_dec(&fc->num_waiting);
150 spin_unlock(&fuse_lock); 127 fuse_request_free(req);
151 } 128 }
152} 129}
153 130
154static void fuse_put_request_locked(struct fuse_conn *fc, struct fuse_req *req) 131void fuse_remove_background(struct fuse_conn *fc, struct fuse_req *req)
155{
156 if (atomic_dec_and_test(&req->count))
157 fuse_putback_request(fc, req);
158}
159
160void fuse_release_background(struct fuse_req *req)
161{ 132{
162 iput(req->inode); 133 list_del_init(&req->bg_entry);
163 iput(req->inode2); 134 if (fc->num_background == FUSE_MAX_BACKGROUND) {
164 if (req->file) 135 fc->blocked = 0;
165 fput(req->file); 136 wake_up_all(&fc->blocked_waitq);
166 spin_lock(&fuse_lock); 137 }
167 list_del(&req->bg_entry); 138 fc->num_background--;
168 spin_unlock(&fuse_lock);
169} 139}
170 140
171/* 141/*
@@ -184,28 +154,38 @@ void fuse_release_background(struct fuse_req *req)
184 * interrupted and put in the background, it will return with an error 154 * interrupted and put in the background, it will return with an error
185 * and hence never be reset and reused. 155 * and hence never be reset and reused.
186 * 156 *
187 * Called with fuse_lock, unlocks it 157 * Called with fc->lock, unlocks it
188 */ 158 */
189static void request_end(struct fuse_conn *fc, struct fuse_req *req) 159static void request_end(struct fuse_conn *fc, struct fuse_req *req)
190{ 160{
191 list_del(&req->list); 161 list_del(&req->list);
192 req->state = FUSE_REQ_FINISHED; 162 req->state = FUSE_REQ_FINISHED;
193 if (!req->background) { 163 if (!req->background) {
164 spin_unlock(&fc->lock);
194 wake_up(&req->waitq); 165 wake_up(&req->waitq);
195 fuse_put_request_locked(fc, req); 166 fuse_put_request(fc, req);
196 spin_unlock(&fuse_lock);
197 } else { 167 } else {
168 struct inode *inode = req->inode;
169 struct inode *inode2 = req->inode2;
170 struct file *file = req->file;
198 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 171 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
199 req->end = NULL; 172 req->end = NULL;
200 spin_unlock(&fuse_lock); 173 req->inode = NULL;
201 down_read(&fc->sbput_sem); 174 req->inode2 = NULL;
202 if (fc->mounted) 175 req->file = NULL;
203 fuse_release_background(req); 176 if (!list_empty(&req->bg_entry))
204 up_read(&fc->sbput_sem); 177 fuse_remove_background(fc, req);
178 spin_unlock(&fc->lock);
179
205 if (end) 180 if (end)
206 end(fc, req); 181 end(fc, req);
207 else 182 else
208 fuse_put_request(fc, req); 183 fuse_put_request(fc, req);
184
185 if (file)
186 fput(file);
187 iput(inode);
188 iput(inode2);
209 } 189 }
210} 190}
211 191
@@ -242,6 +222,9 @@ static void background_request(struct fuse_conn *fc, struct fuse_req *req)
242{ 222{
243 req->background = 1; 223 req->background = 1;
244 list_add(&req->bg_entry, &fc->background); 224 list_add(&req->bg_entry, &fc->background);
225 fc->num_background++;
226 if (fc->num_background == FUSE_MAX_BACKGROUND)
227 fc->blocked = 1;
245 if (req->inode) 228 if (req->inode)
246 req->inode = igrab(req->inode); 229 req->inode = igrab(req->inode);
247 if (req->inode2) 230 if (req->inode2)
@@ -250,16 +233,16 @@ static void background_request(struct fuse_conn *fc, struct fuse_req *req)
250 get_file(req->file); 233 get_file(req->file);
251} 234}
252 235
253/* Called with fuse_lock held. Releases, and then reacquires it. */ 236/* Called with fc->lock held. Releases, and then reacquires it. */
254static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 237static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
255{ 238{
256 sigset_t oldset; 239 sigset_t oldset;
257 240
258 spin_unlock(&fuse_lock); 241 spin_unlock(&fc->lock);
259 block_sigs(&oldset); 242 block_sigs(&oldset);
260 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); 243 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
261 restore_sigs(&oldset); 244 restore_sigs(&oldset);
262 spin_lock(&fuse_lock); 245 spin_lock(&fc->lock);
263 if (req->state == FUSE_REQ_FINISHED && !req->interrupted) 246 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
264 return; 247 return;
265 248
@@ -273,9 +256,9 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
273 locked state, there mustn't be any filesystem 256 locked state, there mustn't be any filesystem
274 operation (e.g. page fault), since that could lead 257 operation (e.g. page fault), since that could lead
275 to deadlock */ 258 to deadlock */
276 spin_unlock(&fuse_lock); 259 spin_unlock(&fc->lock);
277 wait_event(req->waitq, !req->locked); 260 wait_event(req->waitq, !req->locked);
278 spin_lock(&fuse_lock); 261 spin_lock(&fc->lock);
279 } 262 }
280 if (req->state == FUSE_REQ_PENDING) { 263 if (req->state == FUSE_REQ_PENDING) {
281 list_del(&req->list); 264 list_del(&req->list);
@@ -304,19 +287,14 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
304 req->in.h.unique = fc->reqctr; 287 req->in.h.unique = fc->reqctr;
305 req->in.h.len = sizeof(struct fuse_in_header) + 288 req->in.h.len = sizeof(struct fuse_in_header) +
306 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); 289 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
307 if (!req->preallocated) {
308 /* If request is not preallocated (either FORGET or
309 RELEASE), then still decrease outstanding_sem, so
310 user can't open infinite number of files while not
311 processing the RELEASE requests. However for
312 efficiency do it without blocking, so if down()
313 would block, just increase the debt instead */
314 if (down_trylock(&fc->outstanding_sem))
315 fc->outstanding_debt++;
316 }
317 list_add_tail(&req->list, &fc->pending); 290 list_add_tail(&req->list, &fc->pending);
318 req->state = FUSE_REQ_PENDING; 291 req->state = FUSE_REQ_PENDING;
292 if (!req->waiting) {
293 req->waiting = 1;
294 atomic_inc(&fc->num_waiting);
295 }
319 wake_up(&fc->waitq); 296 wake_up(&fc->waitq);
297 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
320} 298}
321 299
322/* 300/*
@@ -325,7 +303,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
325void request_send(struct fuse_conn *fc, struct fuse_req *req) 303void request_send(struct fuse_conn *fc, struct fuse_req *req)
326{ 304{
327 req->isreply = 1; 305 req->isreply = 1;
328 spin_lock(&fuse_lock); 306 spin_lock(&fc->lock);
329 if (!fc->connected) 307 if (!fc->connected)
330 req->out.h.error = -ENOTCONN; 308 req->out.h.error = -ENOTCONN;
331 else if (fc->conn_error) 309 else if (fc->conn_error)
@@ -338,15 +316,16 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
338 316
339 request_wait_answer(fc, req); 317 request_wait_answer(fc, req);
340 } 318 }
341 spin_unlock(&fuse_lock); 319 spin_unlock(&fc->lock);
342} 320}
343 321
344static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) 322static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
345{ 323{
346 spin_lock(&fuse_lock); 324 spin_lock(&fc->lock);
325 background_request(fc, req);
347 if (fc->connected) { 326 if (fc->connected) {
348 queue_request(fc, req); 327 queue_request(fc, req);
349 spin_unlock(&fuse_lock); 328 spin_unlock(&fc->lock);
350 } else { 329 } else {
351 req->out.h.error = -ENOTCONN; 330 req->out.h.error = -ENOTCONN;
352 request_end(fc, req); 331 request_end(fc, req);
@@ -362,9 +341,6 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
362void request_send_background(struct fuse_conn *fc, struct fuse_req *req) 341void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
363{ 342{
364 req->isreply = 1; 343 req->isreply = 1;
365 spin_lock(&fuse_lock);
366 background_request(fc, req);
367 spin_unlock(&fuse_lock);
368 request_send_nowait(fc, req); 344 request_send_nowait(fc, req);
369} 345}
370 346
@@ -373,16 +349,16 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
373 * anything that could cause a page-fault. If the request was already 349 * anything that could cause a page-fault. If the request was already
374 * interrupted bail out. 350 * interrupted bail out.
375 */ 351 */
376static int lock_request(struct fuse_req *req) 352static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
377{ 353{
378 int err = 0; 354 int err = 0;
379 if (req) { 355 if (req) {
380 spin_lock(&fuse_lock); 356 spin_lock(&fc->lock);
381 if (req->interrupted) 357 if (req->interrupted)
382 err = -ENOENT; 358 err = -ENOENT;
383 else 359 else
384 req->locked = 1; 360 req->locked = 1;
385 spin_unlock(&fuse_lock); 361 spin_unlock(&fc->lock);
386 } 362 }
387 return err; 363 return err;
388} 364}
@@ -392,18 +368,19 @@ static int lock_request(struct fuse_req *req)
392 * requester thread is currently waiting for it to be unlocked, so 368 * requester thread is currently waiting for it to be unlocked, so
393 * wake it up. 369 * wake it up.
394 */ 370 */
395static void unlock_request(struct fuse_req *req) 371static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
396{ 372{
397 if (req) { 373 if (req) {
398 spin_lock(&fuse_lock); 374 spin_lock(&fc->lock);
399 req->locked = 0; 375 req->locked = 0;
400 if (req->interrupted) 376 if (req->interrupted)
401 wake_up(&req->waitq); 377 wake_up(&req->waitq);
402 spin_unlock(&fuse_lock); 378 spin_unlock(&fc->lock);
403 } 379 }
404} 380}
405 381
406struct fuse_copy_state { 382struct fuse_copy_state {
383 struct fuse_conn *fc;
407 int write; 384 int write;
408 struct fuse_req *req; 385 struct fuse_req *req;
409 const struct iovec *iov; 386 const struct iovec *iov;
@@ -416,11 +393,12 @@ struct fuse_copy_state {
416 unsigned len; 393 unsigned len;
417}; 394};
418 395
419static void fuse_copy_init(struct fuse_copy_state *cs, int write, 396static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
420 struct fuse_req *req, const struct iovec *iov, 397 int write, struct fuse_req *req,
421 unsigned long nr_segs) 398 const struct iovec *iov, unsigned long nr_segs)
422{ 399{
423 memset(cs, 0, sizeof(*cs)); 400 memset(cs, 0, sizeof(*cs));
401 cs->fc = fc;
424 cs->write = write; 402 cs->write = write;
425 cs->req = req; 403 cs->req = req;
426 cs->iov = iov; 404 cs->iov = iov;
@@ -450,7 +428,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
450 unsigned long offset; 428 unsigned long offset;
451 int err; 429 int err;
452 430
453 unlock_request(cs->req); 431 unlock_request(cs->fc, cs->req);
454 fuse_copy_finish(cs); 432 fuse_copy_finish(cs);
455 if (!cs->seglen) { 433 if (!cs->seglen) {
456 BUG_ON(!cs->nr_segs); 434 BUG_ON(!cs->nr_segs);
@@ -473,7 +451,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
473 cs->seglen -= cs->len; 451 cs->seglen -= cs->len;
474 cs->addr += cs->len; 452 cs->addr += cs->len;
475 453
476 return lock_request(cs->req); 454 return lock_request(cs->fc, cs->req);
477} 455}
478 456
479/* Do as much copy to/from userspace buffer as we can */ 457/* Do as much copy to/from userspace buffer as we can */
@@ -585,9 +563,9 @@ static void request_wait(struct fuse_conn *fc)
585 if (signal_pending(current)) 563 if (signal_pending(current))
586 break; 564 break;
587 565
588 spin_unlock(&fuse_lock); 566 spin_unlock(&fc->lock);
589 schedule(); 567 schedule();
590 spin_lock(&fuse_lock); 568 spin_lock(&fc->lock);
591 } 569 }
592 set_current_state(TASK_RUNNING); 570 set_current_state(TASK_RUNNING);
593 remove_wait_queue(&fc->waitq, &wait); 571 remove_wait_queue(&fc->waitq, &wait);
@@ -606,18 +584,21 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
606 unsigned long nr_segs, loff_t *off) 584 unsigned long nr_segs, loff_t *off)
607{ 585{
608 int err; 586 int err;
609 struct fuse_conn *fc;
610 struct fuse_req *req; 587 struct fuse_req *req;
611 struct fuse_in *in; 588 struct fuse_in *in;
612 struct fuse_copy_state cs; 589 struct fuse_copy_state cs;
613 unsigned reqsize; 590 unsigned reqsize;
591 struct fuse_conn *fc = fuse_get_conn(file);
592 if (!fc)
593 return -EPERM;
614 594
615 restart: 595 restart:
616 spin_lock(&fuse_lock); 596 spin_lock(&fc->lock);
617 fc = file->private_data; 597 err = -EAGAIN;
618 err = -EPERM; 598 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
619 if (!fc) 599 list_empty(&fc->pending))
620 goto err_unlock; 600 goto err_unlock;
601
621 request_wait(fc); 602 request_wait(fc);
622 err = -ENODEV; 603 err = -ENODEV;
623 if (!fc->connected) 604 if (!fc->connected)
@@ -641,14 +622,14 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
641 request_end(fc, req); 622 request_end(fc, req);
642 goto restart; 623 goto restart;
643 } 624 }
644 spin_unlock(&fuse_lock); 625 spin_unlock(&fc->lock);
645 fuse_copy_init(&cs, 1, req, iov, nr_segs); 626 fuse_copy_init(&cs, fc, 1, req, iov, nr_segs);
646 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 627 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
647 if (!err) 628 if (!err)
648 err = fuse_copy_args(&cs, in->numargs, in->argpages, 629 err = fuse_copy_args(&cs, in->numargs, in->argpages,
649 (struct fuse_arg *) in->args, 0); 630 (struct fuse_arg *) in->args, 0);
650 fuse_copy_finish(&cs); 631 fuse_copy_finish(&cs);
651 spin_lock(&fuse_lock); 632 spin_lock(&fc->lock);
652 req->locked = 0; 633 req->locked = 0;
653 if (!err && req->interrupted) 634 if (!err && req->interrupted)
654 err = -ENOENT; 635 err = -ENOENT;
@@ -663,12 +644,12 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
663 else { 644 else {
664 req->state = FUSE_REQ_SENT; 645 req->state = FUSE_REQ_SENT;
665 list_move_tail(&req->list, &fc->processing); 646 list_move_tail(&req->list, &fc->processing);
666 spin_unlock(&fuse_lock); 647 spin_unlock(&fc->lock);
667 } 648 }
668 return reqsize; 649 return reqsize;
669 650
670 err_unlock: 651 err_unlock:
671 spin_unlock(&fuse_lock); 652 spin_unlock(&fc->lock);
672 return err; 653 return err;
673} 654}
674 655
@@ -735,9 +716,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
735 struct fuse_copy_state cs; 716 struct fuse_copy_state cs;
736 struct fuse_conn *fc = fuse_get_conn(file); 717 struct fuse_conn *fc = fuse_get_conn(file);
737 if (!fc) 718 if (!fc)
738 return -ENODEV; 719 return -EPERM;
739 720
740 fuse_copy_init(&cs, 0, NULL, iov, nr_segs); 721 fuse_copy_init(&cs, fc, 0, NULL, iov, nr_segs);
741 if (nbytes < sizeof(struct fuse_out_header)) 722 if (nbytes < sizeof(struct fuse_out_header))
742 return -EINVAL; 723 return -EINVAL;
743 724
@@ -749,7 +730,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
749 oh.len != nbytes) 730 oh.len != nbytes)
750 goto err_finish; 731 goto err_finish;
751 732
752 spin_lock(&fuse_lock); 733 spin_lock(&fc->lock);
753 err = -ENOENT; 734 err = -ENOENT;
754 if (!fc->connected) 735 if (!fc->connected)
755 goto err_unlock; 736 goto err_unlock;
@@ -760,9 +741,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
760 goto err_unlock; 741 goto err_unlock;
761 742
762 if (req->interrupted) { 743 if (req->interrupted) {
763 spin_unlock(&fuse_lock); 744 spin_unlock(&fc->lock);
764 fuse_copy_finish(&cs); 745 fuse_copy_finish(&cs);
765 spin_lock(&fuse_lock); 746 spin_lock(&fc->lock);
766 request_end(fc, req); 747 request_end(fc, req);
767 return -ENOENT; 748 return -ENOENT;
768 } 749 }
@@ -770,12 +751,12 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
770 req->out.h = oh; 751 req->out.h = oh;
771 req->locked = 1; 752 req->locked = 1;
772 cs.req = req; 753 cs.req = req;
773 spin_unlock(&fuse_lock); 754 spin_unlock(&fc->lock);
774 755
775 err = copy_out_args(&cs, &req->out, nbytes); 756 err = copy_out_args(&cs, &req->out, nbytes);
776 fuse_copy_finish(&cs); 757 fuse_copy_finish(&cs);
777 758
778 spin_lock(&fuse_lock); 759 spin_lock(&fc->lock);
779 req->locked = 0; 760 req->locked = 0;
780 if (!err) { 761 if (!err) {
781 if (req->interrupted) 762 if (req->interrupted)
@@ -787,7 +768,7 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
787 return err ? err : nbytes; 768 return err ? err : nbytes;
788 769
789 err_unlock: 770 err_unlock:
790 spin_unlock(&fuse_lock); 771 spin_unlock(&fc->lock);
791 err_finish: 772 err_finish:
792 fuse_copy_finish(&cs); 773 fuse_copy_finish(&cs);
793 return err; 774 return err;
@@ -804,18 +785,19 @@ static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
804 785
805static unsigned fuse_dev_poll(struct file *file, poll_table *wait) 786static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
806{ 787{
807 struct fuse_conn *fc = fuse_get_conn(file);
808 unsigned mask = POLLOUT | POLLWRNORM; 788 unsigned mask = POLLOUT | POLLWRNORM;
809 789 struct fuse_conn *fc = fuse_get_conn(file);
810 if (!fc) 790 if (!fc)
811 return -ENODEV; 791 return POLLERR;
812 792
813 poll_wait(file, &fc->waitq, wait); 793 poll_wait(file, &fc->waitq, wait);
814 794
815 spin_lock(&fuse_lock); 795 spin_lock(&fc->lock);
816 if (!list_empty(&fc->pending)) 796 if (!fc->connected)
817 mask |= POLLIN | POLLRDNORM; 797 mask = POLLERR;
818 spin_unlock(&fuse_lock); 798 else if (!list_empty(&fc->pending))
799 mask |= POLLIN | POLLRDNORM;
800 spin_unlock(&fc->lock);
819 801
820 return mask; 802 return mask;
821} 803}
@@ -823,7 +805,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
823/* 805/*
824 * Abort all requests on the given list (pending or processing) 806 * Abort all requests on the given list (pending or processing)
825 * 807 *
826 * This function releases and reacquires fuse_lock 808 * This function releases and reacquires fc->lock
827 */ 809 */
828static void end_requests(struct fuse_conn *fc, struct list_head *head) 810static void end_requests(struct fuse_conn *fc, struct list_head *head)
829{ 811{
@@ -832,7 +814,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
832 req = list_entry(head->next, struct fuse_req, list); 814 req = list_entry(head->next, struct fuse_req, list);
833 req->out.h.error = -ECONNABORTED; 815 req->out.h.error = -ECONNABORTED;
834 request_end(fc, req); 816 request_end(fc, req);
835 spin_lock(&fuse_lock); 817 spin_lock(&fc->lock);
836 } 818 }
837} 819}
838 820
@@ -863,10 +845,10 @@ static void end_io_requests(struct fuse_conn *fc)
863 req->end = NULL; 845 req->end = NULL;
864 /* The end function will consume this reference */ 846 /* The end function will consume this reference */
865 __fuse_get_request(req); 847 __fuse_get_request(req);
866 spin_unlock(&fuse_lock); 848 spin_unlock(&fc->lock);
867 wait_event(req->waitq, !req->locked); 849 wait_event(req->waitq, !req->locked);
868 end(fc, req); 850 end(fc, req);
869 spin_lock(&fuse_lock); 851 spin_lock(&fc->lock);
870 } 852 }
871 } 853 }
872} 854}
@@ -893,35 +875,44 @@ static void end_io_requests(struct fuse_conn *fc)
893 */ 875 */
894void fuse_abort_conn(struct fuse_conn *fc) 876void fuse_abort_conn(struct fuse_conn *fc)
895{ 877{
896 spin_lock(&fuse_lock); 878 spin_lock(&fc->lock);
897 if (fc->connected) { 879 if (fc->connected) {
898 fc->connected = 0; 880 fc->connected = 0;
899 end_io_requests(fc); 881 end_io_requests(fc);
900 end_requests(fc, &fc->pending); 882 end_requests(fc, &fc->pending);
901 end_requests(fc, &fc->processing); 883 end_requests(fc, &fc->processing);
902 wake_up_all(&fc->waitq); 884 wake_up_all(&fc->waitq);
885 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
903 } 886 }
904 spin_unlock(&fuse_lock); 887 spin_unlock(&fc->lock);
905} 888}
906 889
907static int fuse_dev_release(struct inode *inode, struct file *file) 890static int fuse_dev_release(struct inode *inode, struct file *file)
908{ 891{
909 struct fuse_conn *fc; 892 struct fuse_conn *fc = fuse_get_conn(file);
910
911 spin_lock(&fuse_lock);
912 fc = file->private_data;
913 if (fc) { 893 if (fc) {
894 spin_lock(&fc->lock);
914 fc->connected = 0; 895 fc->connected = 0;
915 end_requests(fc, &fc->pending); 896 end_requests(fc, &fc->pending);
916 end_requests(fc, &fc->processing); 897 end_requests(fc, &fc->processing);
917 } 898 spin_unlock(&fc->lock);
918 spin_unlock(&fuse_lock); 899 fasync_helper(-1, file, 0, &fc->fasync);
919 if (fc)
920 kobject_put(&fc->kobj); 900 kobject_put(&fc->kobj);
901 }
921 902
922 return 0; 903 return 0;
923} 904}
924 905
906static int fuse_dev_fasync(int fd, struct file *file, int on)
907{
908 struct fuse_conn *fc = fuse_get_conn(file);
909 if (!fc)
910 return -EPERM;
911
912 /* No locking - fasync_helper does its own locking */
913 return fasync_helper(fd, file, on, &fc->fasync);
914}
915
925const struct file_operations fuse_dev_operations = { 916const struct file_operations fuse_dev_operations = {
926 .owner = THIS_MODULE, 917 .owner = THIS_MODULE,
927 .llseek = no_llseek, 918 .llseek = no_llseek,
@@ -931,6 +922,7 @@ const struct file_operations fuse_dev_operations = {
931 .writev = fuse_dev_writev, 922 .writev = fuse_dev_writev,
932 .poll = fuse_dev_poll, 923 .poll = fuse_dev_poll,
933 .release = fuse_dev_release, 924 .release = fuse_dev_release,
925 .fasync = fuse_dev_fasync,
934}; 926};
935 927
936static struct miscdevice fuse_miscdevice = { 928static struct miscdevice fuse_miscdevice = {