aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-02 14:21:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-02 14:21:26 -0400
commita7ba4bf5e7ff6bfe83e41c748b77b49297c1b5d9 (patch)
tree389a5982245c1fcc5236c32772502d7c30d58e34 /fs/fuse
parenta611fb75d0517fce65f588cde94f80bb4052c6b2 (diff)
parent0a30f612d6cfd936235b41b090dbe0119c9039d1 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse
Pull fuse updates from Miklos Szeredi: "This is the start of improving fuse scalability. An input queue and a processing queue is split out from the monolithic fuse connection, each of those having their own spinlock. The end of the patchset adds the ability to clone a fuse connection. This means, that instead of having to read/write requests/answers on a single fuse device fd, the fuse daemon can have multiple distinct file descriptors open. Each of those can be used to receive requests and send answers, currently the only constraint is that a request must be answered on the same fd as it was read from. This can be extended further to allow binding a device clone to a specific CPU or NUMA node. Based on a patchset by Srinivas Eeda and Ashish Samant. Thanks to Ashish for the review of this series" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse: (40 commits) fuse: update MAINTAINERS entry fuse: separate pqueue for clones fuse: introduce per-instance fuse_dev structure fuse: device fd clone fuse: abort: no fc->lock needed for request ending fuse: no fc->lock for pqueue parts fuse: no fc->lock in request_end() fuse: cleanup request_end() fuse: request_end(): do once fuse: add req flag for private list fuse: pqueue locking fuse: abort: group pqueue accesses fuse: cleanup fuse_dev_do_read() fuse: move list_del_init() from request_end() into callers fuse: duplicate ->connected in pqueue fuse: separate out processing queue fuse: simplify request_wait() fuse: no fc->lock for iqueue parts fuse: allow interrupt queuing without fc->lock fuse: iqueue locking ...
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/cuse.c15
-rw-r--r--fs/fuse/dev.c825
-rw-r--r--fs/fuse/file.c20
-rw-r--r--fs/fuse/fuse_i.h167
-rw-r--r--fs/fuse/inode.c86
5 files changed, 624 insertions, 489 deletions
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index e5bbf748b698..eae2c11268bc 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -489,6 +489,7 @@ static void cuse_fc_release(struct fuse_conn *fc)
489 */ 489 */
490static int cuse_channel_open(struct inode *inode, struct file *file) 490static int cuse_channel_open(struct inode *inode, struct file *file)
491{ 491{
492 struct fuse_dev *fud;
492 struct cuse_conn *cc; 493 struct cuse_conn *cc;
493 int rc; 494 int rc;
494 495
@@ -499,17 +500,22 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
499 500
500 fuse_conn_init(&cc->fc); 501 fuse_conn_init(&cc->fc);
501 502
503 fud = fuse_dev_alloc(&cc->fc);
504 if (!fud) {
505 kfree(cc);
506 return -ENOMEM;
507 }
508
502 INIT_LIST_HEAD(&cc->list); 509 INIT_LIST_HEAD(&cc->list);
503 cc->fc.release = cuse_fc_release; 510 cc->fc.release = cuse_fc_release;
504 511
505 cc->fc.connected = 1;
506 cc->fc.initialized = 1; 512 cc->fc.initialized = 1;
507 rc = cuse_send_init(cc); 513 rc = cuse_send_init(cc);
508 if (rc) { 514 if (rc) {
509 fuse_conn_put(&cc->fc); 515 fuse_dev_free(fud);
510 return rc; 516 return rc;
511 } 517 }
512 file->private_data = &cc->fc; /* channel owns base reference to cc */ 518 file->private_data = fud;
513 519
514 return 0; 520 return 0;
515} 521}
@@ -527,7 +533,8 @@ static int cuse_channel_open(struct inode *inode, struct file *file)
527 */ 533 */
528static int cuse_channel_release(struct inode *inode, struct file *file) 534static int cuse_channel_release(struct inode *inode, struct file *file)
529{ 535{
530 struct cuse_conn *cc = fc_to_cc(file->private_data); 536 struct fuse_dev *fud = file->private_data;
537 struct cuse_conn *cc = fc_to_cc(fud->fc);
531 int rc; 538 int rc;
532 539
533 /* remove from the conntbl, no more access from this point on */ 540 /* remove from the conntbl, no more access from this point on */
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c8b68ab2e574..80cc1b35d460 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -25,13 +25,13 @@ MODULE_ALIAS("devname:fuse");
25 25
26static struct kmem_cache *fuse_req_cachep; 26static struct kmem_cache *fuse_req_cachep;
27 27
28static struct fuse_conn *fuse_get_conn(struct file *file) 28static struct fuse_dev *fuse_get_dev(struct file *file)
29{ 29{
30 /* 30 /*
31 * Lockless access is OK, because file->private data is set 31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released. 32 * once during mount and is valid until the file is released.
33 */ 33 */
34 return file->private_data; 34 return ACCESS_ONCE(file->private_data);
35} 35}
36 36
37static void fuse_request_init(struct fuse_req *req, struct page **pages, 37static void fuse_request_init(struct fuse_req *req, struct page **pages,
@@ -48,6 +48,7 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages,
48 req->pages = pages; 48 req->pages = pages;
49 req->page_descs = page_descs; 49 req->page_descs = page_descs;
50 req->max_pages = npages; 50 req->max_pages = npages;
51 __set_bit(FR_PENDING, &req->flags);
51} 52}
52 53
53static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) 54static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
@@ -168,6 +169,10 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
168 if (!fc->connected) 169 if (!fc->connected)
169 goto out; 170 goto out;
170 171
172 err = -ECONNREFUSED;
173 if (fc->conn_error)
174 goto out;
175
171 req = fuse_request_alloc(npages); 176 req = fuse_request_alloc(npages);
172 err = -ENOMEM; 177 err = -ENOMEM;
173 if (!req) { 178 if (!req) {
@@ -177,8 +182,10 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
177 } 182 }
178 183
179 fuse_req_init_context(req); 184 fuse_req_init_context(req);
180 req->waiting = 1; 185 __set_bit(FR_WAITING, &req->flags);
181 req->background = for_background; 186 if (for_background)
187 __set_bit(FR_BACKGROUND, &req->flags);
188
182 return req; 189 return req;
183 190
184 out: 191 out:
@@ -268,15 +275,15 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
268 req = get_reserved_req(fc, file); 275 req = get_reserved_req(fc, file);
269 276
270 fuse_req_init_context(req); 277 fuse_req_init_context(req);
271 req->waiting = 1; 278 __set_bit(FR_WAITING, &req->flags);
272 req->background = 0; 279 __clear_bit(FR_BACKGROUND, &req->flags);
273 return req; 280 return req;
274} 281}
275 282
276void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 283void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
277{ 284{
278 if (atomic_dec_and_test(&req->count)) { 285 if (atomic_dec_and_test(&req->count)) {
279 if (unlikely(req->background)) { 286 if (test_bit(FR_BACKGROUND, &req->flags)) {
280 /* 287 /*
281 * We get here in the unlikely case that a background 288 * We get here in the unlikely case that a background
282 * request was allocated but not sent 289 * request was allocated but not sent
@@ -287,8 +294,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
287 spin_unlock(&fc->lock); 294 spin_unlock(&fc->lock);
288 } 295 }
289 296
290 if (req->waiting) 297 if (test_bit(FR_WAITING, &req->flags)) {
298 __clear_bit(FR_WAITING, &req->flags);
291 atomic_dec(&fc->num_waiting); 299 atomic_dec(&fc->num_waiting);
300 }
292 301
293 if (req->stolen_file) 302 if (req->stolen_file)
294 put_reserved_req(fc, req); 303 put_reserved_req(fc, req);
@@ -309,46 +318,38 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
309 return nbytes; 318 return nbytes;
310} 319}
311 320
312static u64 fuse_get_unique(struct fuse_conn *fc) 321static u64 fuse_get_unique(struct fuse_iqueue *fiq)
313{ 322{
314 fc->reqctr++; 323 return ++fiq->reqctr;
315 /* zero is special */
316 if (fc->reqctr == 0)
317 fc->reqctr = 1;
318
319 return fc->reqctr;
320} 324}
321 325
322static void queue_request(struct fuse_conn *fc, struct fuse_req *req) 326static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
323{ 327{
324 req->in.h.len = sizeof(struct fuse_in_header) + 328 req->in.h.len = sizeof(struct fuse_in_header) +
325 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); 329 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
326 list_add_tail(&req->list, &fc->pending); 330 list_add_tail(&req->list, &fiq->pending);
327 req->state = FUSE_REQ_PENDING; 331 wake_up_locked(&fiq->waitq);
328 if (!req->waiting) { 332 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
329 req->waiting = 1;
330 atomic_inc(&fc->num_waiting);
331 }
332 wake_up(&fc->waitq);
333 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
334} 333}
335 334
336void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, 335void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
337 u64 nodeid, u64 nlookup) 336 u64 nodeid, u64 nlookup)
338{ 337{
338 struct fuse_iqueue *fiq = &fc->iq;
339
339 forget->forget_one.nodeid = nodeid; 340 forget->forget_one.nodeid = nodeid;
340 forget->forget_one.nlookup = nlookup; 341 forget->forget_one.nlookup = nlookup;
341 342
342 spin_lock(&fc->lock); 343 spin_lock(&fiq->waitq.lock);
343 if (fc->connected) { 344 if (fiq->connected) {
344 fc->forget_list_tail->next = forget; 345 fiq->forget_list_tail->next = forget;
345 fc->forget_list_tail = forget; 346 fiq->forget_list_tail = forget;
346 wake_up(&fc->waitq); 347 wake_up_locked(&fiq->waitq);
347 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 348 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
348 } else { 349 } else {
349 kfree(forget); 350 kfree(forget);
350 } 351 }
351 spin_unlock(&fc->lock); 352 spin_unlock(&fiq->waitq.lock);
352} 353}
353 354
354static void flush_bg_queue(struct fuse_conn *fc) 355static void flush_bg_queue(struct fuse_conn *fc)
@@ -356,12 +357,15 @@ static void flush_bg_queue(struct fuse_conn *fc)
356 while (fc->active_background < fc->max_background && 357 while (fc->active_background < fc->max_background &&
357 !list_empty(&fc->bg_queue)) { 358 !list_empty(&fc->bg_queue)) {
358 struct fuse_req *req; 359 struct fuse_req *req;
360 struct fuse_iqueue *fiq = &fc->iq;
359 361
360 req = list_entry(fc->bg_queue.next, struct fuse_req, list); 362 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
361 list_del(&req->list); 363 list_del(&req->list);
362 fc->active_background++; 364 fc->active_background++;
363 req->in.h.unique = fuse_get_unique(fc); 365 spin_lock(&fiq->waitq.lock);
364 queue_request(fc, req); 366 req->in.h.unique = fuse_get_unique(fiq);
367 queue_request(fiq, req);
368 spin_unlock(&fiq->waitq.lock);
365 } 369 }
366} 370}
367 371
@@ -372,20 +376,22 @@ static void flush_bg_queue(struct fuse_conn *fc)
372 * was closed. The requester thread is woken up (if still waiting), 376 * was closed. The requester thread is woken up (if still waiting),
373 * the 'end' callback is called if given, else the reference to the 377 * the 'end' callback is called if given, else the reference to the
374 * request is released 378 * request is released
375 *
376 * Called with fc->lock, unlocks it
377 */ 379 */
378static void request_end(struct fuse_conn *fc, struct fuse_req *req) 380static void request_end(struct fuse_conn *fc, struct fuse_req *req)
379__releases(fc->lock)
380{ 381{
381 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 382 struct fuse_iqueue *fiq = &fc->iq;
382 req->end = NULL; 383
383 list_del(&req->list); 384 if (test_and_set_bit(FR_FINISHED, &req->flags))
384 list_del(&req->intr_entry); 385 return;
385 req->state = FUSE_REQ_FINISHED;
386 if (req->background) {
387 req->background = 0;
388 386
387 spin_lock(&fiq->waitq.lock);
388 list_del_init(&req->intr_entry);
389 spin_unlock(&fiq->waitq.lock);
390 WARN_ON(test_bit(FR_PENDING, &req->flags));
391 WARN_ON(test_bit(FR_SENT, &req->flags));
392 if (test_bit(FR_BACKGROUND, &req->flags)) {
393 spin_lock(&fc->lock);
394 clear_bit(FR_BACKGROUND, &req->flags);
389 if (fc->num_background == fc->max_background) 395 if (fc->num_background == fc->max_background)
390 fc->blocked = 0; 396 fc->blocked = 0;
391 397
@@ -401,122 +407,105 @@ __releases(fc->lock)
401 fc->num_background--; 407 fc->num_background--;
402 fc->active_background--; 408 fc->active_background--;
403 flush_bg_queue(fc); 409 flush_bg_queue(fc);
410 spin_unlock(&fc->lock);
404 } 411 }
405 spin_unlock(&fc->lock);
406 wake_up(&req->waitq); 412 wake_up(&req->waitq);
407 if (end) 413 if (req->end)
408 end(fc, req); 414 req->end(fc, req);
409 fuse_put_request(fc, req); 415 fuse_put_request(fc, req);
410} 416}
411 417
412static void wait_answer_interruptible(struct fuse_conn *fc, 418static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
413 struct fuse_req *req)
414__releases(fc->lock)
415__acquires(fc->lock)
416{
417 if (signal_pending(current))
418 return;
419
420 spin_unlock(&fc->lock);
421 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
422 spin_lock(&fc->lock);
423}
424
425static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
426{ 419{
427 list_add_tail(&req->intr_entry, &fc->interrupts); 420 spin_lock(&fiq->waitq.lock);
428 wake_up(&fc->waitq); 421 if (list_empty(&req->intr_entry)) {
429 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 422 list_add_tail(&req->intr_entry, &fiq->interrupts);
423 wake_up_locked(&fiq->waitq);
424 }
425 spin_unlock(&fiq->waitq.lock);
426 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
430} 427}
431 428
432static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 429static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
433__releases(fc->lock)
434__acquires(fc->lock)
435{ 430{
431 struct fuse_iqueue *fiq = &fc->iq;
432 int err;
433
436 if (!fc->no_interrupt) { 434 if (!fc->no_interrupt) {
437 /* Any signal may interrupt this */ 435 /* Any signal may interrupt this */
438 wait_answer_interruptible(fc, req); 436 err = wait_event_interruptible(req->waitq,
439 437 test_bit(FR_FINISHED, &req->flags));
440 if (req->aborted) 438 if (!err)
441 goto aborted;
442 if (req->state == FUSE_REQ_FINISHED)
443 return; 439 return;
444 440
445 req->interrupted = 1; 441 set_bit(FR_INTERRUPTED, &req->flags);
446 if (req->state == FUSE_REQ_SENT) 442 /* matches barrier in fuse_dev_do_read() */
447 queue_interrupt(fc, req); 443 smp_mb__after_atomic();
444 if (test_bit(FR_SENT, &req->flags))
445 queue_interrupt(fiq, req);
448 } 446 }
449 447
450 if (!req->force) { 448 if (!test_bit(FR_FORCE, &req->flags)) {
451 sigset_t oldset; 449 sigset_t oldset;
452 450
453 /* Only fatal signals may interrupt this */ 451 /* Only fatal signals may interrupt this */
454 block_sigs(&oldset); 452 block_sigs(&oldset);
455 wait_answer_interruptible(fc, req); 453 err = wait_event_interruptible(req->waitq,
454 test_bit(FR_FINISHED, &req->flags));
456 restore_sigs(&oldset); 455 restore_sigs(&oldset);
457 456
458 if (req->aborted) 457 if (!err)
459 goto aborted;
460 if (req->state == FUSE_REQ_FINISHED)
461 return; 458 return;
462 459
460 spin_lock(&fiq->waitq.lock);
463 /* Request is not yet in userspace, bail out */ 461 /* Request is not yet in userspace, bail out */
464 if (req->state == FUSE_REQ_PENDING) { 462 if (test_bit(FR_PENDING, &req->flags)) {
465 list_del(&req->list); 463 list_del(&req->list);
464 spin_unlock(&fiq->waitq.lock);
466 __fuse_put_request(req); 465 __fuse_put_request(req);
467 req->out.h.error = -EINTR; 466 req->out.h.error = -EINTR;
468 return; 467 return;
469 } 468 }
469 spin_unlock(&fiq->waitq.lock);
470 } 470 }
471 471
472 /* 472 /*
473 * Either request is already in userspace, or it was forced. 473 * Either request is already in userspace, or it was forced.
474 * Wait it out. 474 * Wait it out.
475 */ 475 */
476 spin_unlock(&fc->lock); 476 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
477 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
478 spin_lock(&fc->lock);
479
480 if (!req->aborted)
481 return;
482
483 aborted:
484 BUG_ON(req->state != FUSE_REQ_FINISHED);
485 if (req->locked) {
486 /* This is uninterruptible sleep, because data is
487 being copied to/from the buffers of req. During
488 locked state, there mustn't be any filesystem
489 operation (e.g. page fault), since that could lead
490 to deadlock */
491 spin_unlock(&fc->lock);
492 wait_event(req->waitq, !req->locked);
493 spin_lock(&fc->lock);
494 }
495} 477}
496 478
497static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) 479static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
498{ 480{
499 BUG_ON(req->background); 481 struct fuse_iqueue *fiq = &fc->iq;
500 spin_lock(&fc->lock); 482
501 if (!fc->connected) 483 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
484 spin_lock(&fiq->waitq.lock);
485 if (!fiq->connected) {
486 spin_unlock(&fiq->waitq.lock);
502 req->out.h.error = -ENOTCONN; 487 req->out.h.error = -ENOTCONN;
503 else if (fc->conn_error) 488 } else {
504 req->out.h.error = -ECONNREFUSED; 489 req->in.h.unique = fuse_get_unique(fiq);
505 else { 490 queue_request(fiq, req);
506 req->in.h.unique = fuse_get_unique(fc);
507 queue_request(fc, req);
508 /* acquire extra reference, since request is still needed 491 /* acquire extra reference, since request is still needed
509 after request_end() */ 492 after request_end() */
510 __fuse_get_request(req); 493 __fuse_get_request(req);
494 spin_unlock(&fiq->waitq.lock);
511 495
512 request_wait_answer(fc, req); 496 request_wait_answer(fc, req);
497 /* Pairs with smp_wmb() in request_end() */
498 smp_rmb();
513 } 499 }
514 spin_unlock(&fc->lock);
515} 500}
516 501
517void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) 502void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
518{ 503{
519 req->isreply = 1; 504 __set_bit(FR_ISREPLY, &req->flags);
505 if (!test_bit(FR_WAITING, &req->flags)) {
506 __set_bit(FR_WAITING, &req->flags);
507 atomic_inc(&fc->num_waiting);
508 }
520 __fuse_request_send(fc, req); 509 __fuse_request_send(fc, req);
521} 510}
522EXPORT_SYMBOL_GPL(fuse_request_send); 511EXPORT_SYMBOL_GPL(fuse_request_send);
@@ -586,10 +575,20 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
586 return ret; 575 return ret;
587} 576}
588 577
589static void fuse_request_send_nowait_locked(struct fuse_conn *fc, 578/*
590 struct fuse_req *req) 579 * Called under fc->lock
580 *
581 * fc->connected must have been checked previously
582 */
583void fuse_request_send_background_locked(struct fuse_conn *fc,
584 struct fuse_req *req)
591{ 585{
592 BUG_ON(!req->background); 586 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
587 if (!test_bit(FR_WAITING, &req->flags)) {
588 __set_bit(FR_WAITING, &req->flags);
589 atomic_inc(&fc->num_waiting);
590 }
591 __set_bit(FR_ISREPLY, &req->flags);
593 fc->num_background++; 592 fc->num_background++;
594 if (fc->num_background == fc->max_background) 593 if (fc->num_background == fc->max_background)
595 fc->blocked = 1; 594 fc->blocked = 1;
@@ -602,54 +601,40 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
602 flush_bg_queue(fc); 601 flush_bg_queue(fc);
603} 602}
604 603
605static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) 604void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
606{ 605{
606 BUG_ON(!req->end);
607 spin_lock(&fc->lock); 607 spin_lock(&fc->lock);
608 if (fc->connected) { 608 if (fc->connected) {
609 fuse_request_send_nowait_locked(fc, req); 609 fuse_request_send_background_locked(fc, req);
610 spin_unlock(&fc->lock); 610 spin_unlock(&fc->lock);
611 } else { 611 } else {
612 spin_unlock(&fc->lock);
612 req->out.h.error = -ENOTCONN; 613 req->out.h.error = -ENOTCONN;
613 request_end(fc, req); 614 req->end(fc, req);
615 fuse_put_request(fc, req);
614 } 616 }
615} 617}
616
617void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
618{
619 req->isreply = 1;
620 fuse_request_send_nowait(fc, req);
621}
622EXPORT_SYMBOL_GPL(fuse_request_send_background); 618EXPORT_SYMBOL_GPL(fuse_request_send_background);
623 619
624static int fuse_request_send_notify_reply(struct fuse_conn *fc, 620static int fuse_request_send_notify_reply(struct fuse_conn *fc,
625 struct fuse_req *req, u64 unique) 621 struct fuse_req *req, u64 unique)
626{ 622{
627 int err = -ENODEV; 623 int err = -ENODEV;
624 struct fuse_iqueue *fiq = &fc->iq;
628 625
629 req->isreply = 0; 626 __clear_bit(FR_ISREPLY, &req->flags);
630 req->in.h.unique = unique; 627 req->in.h.unique = unique;
631 spin_lock(&fc->lock); 628 spin_lock(&fiq->waitq.lock);
632 if (fc->connected) { 629 if (fiq->connected) {
633 queue_request(fc, req); 630 queue_request(fiq, req);
634 err = 0; 631 err = 0;
635 } 632 }
636 spin_unlock(&fc->lock); 633 spin_unlock(&fiq->waitq.lock);
637 634
638 return err; 635 return err;
639} 636}
640 637
641/*
642 * Called under fc->lock
643 *
644 * fc->connected must have been checked previously
645 */
646void fuse_request_send_background_locked(struct fuse_conn *fc,
647 struct fuse_req *req)
648{
649 req->isreply = 1;
650 fuse_request_send_nowait_locked(fc, req);
651}
652
653void fuse_force_forget(struct file *file, u64 nodeid) 638void fuse_force_forget(struct file *file, u64 nodeid)
654{ 639{
655 struct inode *inode = file_inode(file); 640 struct inode *inode = file_inode(file);
@@ -665,7 +650,7 @@ void fuse_force_forget(struct file *file, u64 nodeid)
665 req->in.numargs = 1; 650 req->in.numargs = 1;
666 req->in.args[0].size = sizeof(inarg); 651 req->in.args[0].size = sizeof(inarg);
667 req->in.args[0].value = &inarg; 652 req->in.args[0].value = &inarg;
668 req->isreply = 0; 653 __clear_bit(FR_ISREPLY, &req->flags);
669 __fuse_request_send(fc, req); 654 __fuse_request_send(fc, req);
670 /* ignore errors */ 655 /* ignore errors */
671 fuse_put_request(fc, req); 656 fuse_put_request(fc, req);
@@ -676,38 +661,39 @@ void fuse_force_forget(struct file *file, u64 nodeid)
676 * anything that could cause a page-fault. If the request was already 661 * anything that could cause a page-fault. If the request was already
677 * aborted bail out. 662 * aborted bail out.
678 */ 663 */
679static int lock_request(struct fuse_conn *fc, struct fuse_req *req) 664static int lock_request(struct fuse_req *req)
680{ 665{
681 int err = 0; 666 int err = 0;
682 if (req) { 667 if (req) {
683 spin_lock(&fc->lock); 668 spin_lock(&req->waitq.lock);
684 if (req->aborted) 669 if (test_bit(FR_ABORTED, &req->flags))
685 err = -ENOENT; 670 err = -ENOENT;
686 else 671 else
687 req->locked = 1; 672 set_bit(FR_LOCKED, &req->flags);
688 spin_unlock(&fc->lock); 673 spin_unlock(&req->waitq.lock);
689 } 674 }
690 return err; 675 return err;
691} 676}
692 677
693/* 678/*
694 * Unlock request. If it was aborted during being locked, the 679 * Unlock request. If it was aborted while locked, caller is responsible
695 * requester thread is currently waiting for it to be unlocked, so 680 * for unlocking and ending the request.
696 * wake it up.
697 */ 681 */
698static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) 682static int unlock_request(struct fuse_req *req)
699{ 683{
684 int err = 0;
700 if (req) { 685 if (req) {
701 spin_lock(&fc->lock); 686 spin_lock(&req->waitq.lock);
702 req->locked = 0; 687 if (test_bit(FR_ABORTED, &req->flags))
703 if (req->aborted) 688 err = -ENOENT;
704 wake_up(&req->waitq); 689 else
705 spin_unlock(&fc->lock); 690 clear_bit(FR_LOCKED, &req->flags);
691 spin_unlock(&req->waitq.lock);
706 } 692 }
693 return err;
707} 694}
708 695
709struct fuse_copy_state { 696struct fuse_copy_state {
710 struct fuse_conn *fc;
711 int write; 697 int write;
712 struct fuse_req *req; 698 struct fuse_req *req;
713 struct iov_iter *iter; 699 struct iov_iter *iter;
@@ -721,13 +707,10 @@ struct fuse_copy_state {
721 unsigned move_pages:1; 707 unsigned move_pages:1;
722}; 708};
723 709
724static void fuse_copy_init(struct fuse_copy_state *cs, 710static void fuse_copy_init(struct fuse_copy_state *cs, int write,
725 struct fuse_conn *fc,
726 int write,
727 struct iov_iter *iter) 711 struct iov_iter *iter)
728{ 712{
729 memset(cs, 0, sizeof(*cs)); 713 memset(cs, 0, sizeof(*cs));
730 cs->fc = fc;
731 cs->write = write; 714 cs->write = write;
732 cs->iter = iter; 715 cs->iter = iter;
733} 716}
@@ -760,7 +743,10 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
760 struct page *page; 743 struct page *page;
761 int err; 744 int err;
762 745
763 unlock_request(cs->fc, cs->req); 746 err = unlock_request(cs->req);
747 if (err)
748 return err;
749
764 fuse_copy_finish(cs); 750 fuse_copy_finish(cs);
765 if (cs->pipebufs) { 751 if (cs->pipebufs) {
766 struct pipe_buffer *buf = cs->pipebufs; 752 struct pipe_buffer *buf = cs->pipebufs;
@@ -809,7 +795,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
809 iov_iter_advance(cs->iter, err); 795 iov_iter_advance(cs->iter, err);
810 } 796 }
811 797
812 return lock_request(cs->fc, cs->req); 798 return lock_request(cs->req);
813} 799}
814 800
815/* Do as much copy to/from userspace buffer as we can */ 801/* Do as much copy to/from userspace buffer as we can */
@@ -860,7 +846,10 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
860 struct page *newpage; 846 struct page *newpage;
861 struct pipe_buffer *buf = cs->pipebufs; 847 struct pipe_buffer *buf = cs->pipebufs;
862 848
863 unlock_request(cs->fc, cs->req); 849 err = unlock_request(cs->req);
850 if (err)
851 return err;
852
864 fuse_copy_finish(cs); 853 fuse_copy_finish(cs);
865 854
866 err = buf->ops->confirm(cs->pipe, buf); 855 err = buf->ops->confirm(cs->pipe, buf);
@@ -914,12 +903,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
914 lru_cache_add_file(newpage); 903 lru_cache_add_file(newpage);
915 904
916 err = 0; 905 err = 0;
917 spin_lock(&cs->fc->lock); 906 spin_lock(&cs->req->waitq.lock);
918 if (cs->req->aborted) 907 if (test_bit(FR_ABORTED, &cs->req->flags))
919 err = -ENOENT; 908 err = -ENOENT;
920 else 909 else
921 *pagep = newpage; 910 *pagep = newpage;
922 spin_unlock(&cs->fc->lock); 911 spin_unlock(&cs->req->waitq.lock);
923 912
924 if (err) { 913 if (err) {
925 unlock_page(newpage); 914 unlock_page(newpage);
@@ -939,7 +928,7 @@ out_fallback:
939 cs->pg = buf->page; 928 cs->pg = buf->page;
940 cs->offset = buf->offset; 929 cs->offset = buf->offset;
941 930
942 err = lock_request(cs->fc, cs->req); 931 err = lock_request(cs->req);
943 if (err) 932 if (err)
944 return err; 933 return err;
945 934
@@ -950,11 +939,15 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
950 unsigned offset, unsigned count) 939 unsigned offset, unsigned count)
951{ 940{
952 struct pipe_buffer *buf; 941 struct pipe_buffer *buf;
942 int err;
953 943
954 if (cs->nr_segs == cs->pipe->buffers) 944 if (cs->nr_segs == cs->pipe->buffers)
955 return -EIO; 945 return -EIO;
956 946
957 unlock_request(cs->fc, cs->req); 947 err = unlock_request(cs->req);
948 if (err)
949 return err;
950
958 fuse_copy_finish(cs); 951 fuse_copy_finish(cs);
959 952
960 buf = cs->pipebufs; 953 buf = cs->pipebufs;
@@ -1065,36 +1058,15 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1065 return err; 1058 return err;
1066} 1059}
1067 1060
1068static int forget_pending(struct fuse_conn *fc) 1061static int forget_pending(struct fuse_iqueue *fiq)
1069{ 1062{
1070 return fc->forget_list_head.next != NULL; 1063 return fiq->forget_list_head.next != NULL;
1071} 1064}
1072 1065
1073static int request_pending(struct fuse_conn *fc) 1066static int request_pending(struct fuse_iqueue *fiq)
1074{ 1067{
1075 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) || 1068 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1076 forget_pending(fc); 1069 forget_pending(fiq);
1077}
1078
1079/* Wait until a request is available on the pending list */
1080static void request_wait(struct fuse_conn *fc)
1081__releases(fc->lock)
1082__acquires(fc->lock)
1083{
1084 DECLARE_WAITQUEUE(wait, current);
1085
1086 add_wait_queue_exclusive(&fc->waitq, &wait);
1087 while (fc->connected && !request_pending(fc)) {
1088 set_current_state(TASK_INTERRUPTIBLE);
1089 if (signal_pending(current))
1090 break;
1091
1092 spin_unlock(&fc->lock);
1093 schedule();
1094 spin_lock(&fc->lock);
1095 }
1096 set_current_state(TASK_RUNNING);
1097 remove_wait_queue(&fc->waitq, &wait);
1098} 1070}
1099 1071
1100/* 1072/*
@@ -1103,11 +1075,12 @@ __acquires(fc->lock)
1103 * Unlike other requests this is assembled on demand, without a need 1075 * Unlike other requests this is assembled on demand, without a need
1104 * to allocate a separate fuse_req structure. 1076 * to allocate a separate fuse_req structure.
1105 * 1077 *
1106 * Called with fc->lock held, releases it 1078 * Called with fiq->waitq.lock held, releases it
1107 */ 1079 */
1108static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, 1080static int fuse_read_interrupt(struct fuse_iqueue *fiq,
1081 struct fuse_copy_state *cs,
1109 size_t nbytes, struct fuse_req *req) 1082 size_t nbytes, struct fuse_req *req)
1110__releases(fc->lock) 1083__releases(fiq->waitq.lock)
1111{ 1084{
1112 struct fuse_in_header ih; 1085 struct fuse_in_header ih;
1113 struct fuse_interrupt_in arg; 1086 struct fuse_interrupt_in arg;
@@ -1115,7 +1088,7 @@ __releases(fc->lock)
1115 int err; 1088 int err;
1116 1089
1117 list_del_init(&req->intr_entry); 1090 list_del_init(&req->intr_entry);
1118 req->intr_unique = fuse_get_unique(fc); 1091 req->intr_unique = fuse_get_unique(fiq);
1119 memset(&ih, 0, sizeof(ih)); 1092 memset(&ih, 0, sizeof(ih));
1120 memset(&arg, 0, sizeof(arg)); 1093 memset(&arg, 0, sizeof(arg));
1121 ih.len = reqsize; 1094 ih.len = reqsize;
@@ -1123,7 +1096,7 @@ __releases(fc->lock)
1123 ih.unique = req->intr_unique; 1096 ih.unique = req->intr_unique;
1124 arg.unique = req->in.h.unique; 1097 arg.unique = req->in.h.unique;
1125 1098
1126 spin_unlock(&fc->lock); 1099 spin_unlock(&fiq->waitq.lock);
1127 if (nbytes < reqsize) 1100 if (nbytes < reqsize)
1128 return -EINVAL; 1101 return -EINVAL;
1129 1102
@@ -1135,21 +1108,21 @@ __releases(fc->lock)
1135 return err ? err : reqsize; 1108 return err ? err : reqsize;
1136} 1109}
1137 1110
1138static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, 1111static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1139 unsigned max, 1112 unsigned max,
1140 unsigned *countp) 1113 unsigned *countp)
1141{ 1114{
1142 struct fuse_forget_link *head = fc->forget_list_head.next; 1115 struct fuse_forget_link *head = fiq->forget_list_head.next;
1143 struct fuse_forget_link **newhead = &head; 1116 struct fuse_forget_link **newhead = &head;
1144 unsigned count; 1117 unsigned count;
1145 1118
1146 for (count = 0; *newhead != NULL && count < max; count++) 1119 for (count = 0; *newhead != NULL && count < max; count++)
1147 newhead = &(*newhead)->next; 1120 newhead = &(*newhead)->next;
1148 1121
1149 fc->forget_list_head.next = *newhead; 1122 fiq->forget_list_head.next = *newhead;
1150 *newhead = NULL; 1123 *newhead = NULL;
1151 if (fc->forget_list_head.next == NULL) 1124 if (fiq->forget_list_head.next == NULL)
1152 fc->forget_list_tail = &fc->forget_list_head; 1125 fiq->forget_list_tail = &fiq->forget_list_head;
1153 1126
1154 if (countp != NULL) 1127 if (countp != NULL)
1155 *countp = count; 1128 *countp = count;
@@ -1157,24 +1130,24 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
1157 return head; 1130 return head;
1158} 1131}
1159 1132
1160static int fuse_read_single_forget(struct fuse_conn *fc, 1133static int fuse_read_single_forget(struct fuse_iqueue *fiq,
1161 struct fuse_copy_state *cs, 1134 struct fuse_copy_state *cs,
1162 size_t nbytes) 1135 size_t nbytes)
1163__releases(fc->lock) 1136__releases(fiq->waitq.lock)
1164{ 1137{
1165 int err; 1138 int err;
1166 struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL); 1139 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1167 struct fuse_forget_in arg = { 1140 struct fuse_forget_in arg = {
1168 .nlookup = forget->forget_one.nlookup, 1141 .nlookup = forget->forget_one.nlookup,
1169 }; 1142 };
1170 struct fuse_in_header ih = { 1143 struct fuse_in_header ih = {
1171 .opcode = FUSE_FORGET, 1144 .opcode = FUSE_FORGET,
1172 .nodeid = forget->forget_one.nodeid, 1145 .nodeid = forget->forget_one.nodeid,
1173 .unique = fuse_get_unique(fc), 1146 .unique = fuse_get_unique(fiq),
1174 .len = sizeof(ih) + sizeof(arg), 1147 .len = sizeof(ih) + sizeof(arg),
1175 }; 1148 };
1176 1149
1177 spin_unlock(&fc->lock); 1150 spin_unlock(&fiq->waitq.lock);
1178 kfree(forget); 1151 kfree(forget);
1179 if (nbytes < ih.len) 1152 if (nbytes < ih.len)
1180 return -EINVAL; 1153 return -EINVAL;
@@ -1190,9 +1163,9 @@ __releases(fc->lock)
1190 return ih.len; 1163 return ih.len;
1191} 1164}
1192 1165
1193static int fuse_read_batch_forget(struct fuse_conn *fc, 1166static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
1194 struct fuse_copy_state *cs, size_t nbytes) 1167 struct fuse_copy_state *cs, size_t nbytes)
1195__releases(fc->lock) 1168__releases(fiq->waitq.lock)
1196{ 1169{
1197 int err; 1170 int err;
1198 unsigned max_forgets; 1171 unsigned max_forgets;
@@ -1201,18 +1174,18 @@ __releases(fc->lock)
1201 struct fuse_batch_forget_in arg = { .count = 0 }; 1174 struct fuse_batch_forget_in arg = { .count = 0 };
1202 struct fuse_in_header ih = { 1175 struct fuse_in_header ih = {
1203 .opcode = FUSE_BATCH_FORGET, 1176 .opcode = FUSE_BATCH_FORGET,
1204 .unique = fuse_get_unique(fc), 1177 .unique = fuse_get_unique(fiq),
1205 .len = sizeof(ih) + sizeof(arg), 1178 .len = sizeof(ih) + sizeof(arg),
1206 }; 1179 };
1207 1180
1208 if (nbytes < ih.len) { 1181 if (nbytes < ih.len) {
1209 spin_unlock(&fc->lock); 1182 spin_unlock(&fiq->waitq.lock);
1210 return -EINVAL; 1183 return -EINVAL;
1211 } 1184 }
1212 1185
1213 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); 1186 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1214 head = dequeue_forget(fc, max_forgets, &count); 1187 head = dequeue_forget(fiq, max_forgets, &count);
1215 spin_unlock(&fc->lock); 1188 spin_unlock(&fiq->waitq.lock);
1216 1189
1217 arg.count = count; 1190 arg.count = count;
1218 ih.len += count * sizeof(struct fuse_forget_one); 1191 ih.len += count * sizeof(struct fuse_forget_one);
@@ -1239,14 +1212,15 @@ __releases(fc->lock)
1239 return ih.len; 1212 return ih.len;
1240} 1213}
1241 1214
1242static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, 1215static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
1216 struct fuse_copy_state *cs,
1243 size_t nbytes) 1217 size_t nbytes)
1244__releases(fc->lock) 1218__releases(fiq->waitq.lock)
1245{ 1219{
1246 if (fc->minor < 16 || fc->forget_list_head.next->next == NULL) 1220 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1247 return fuse_read_single_forget(fc, cs, nbytes); 1221 return fuse_read_single_forget(fiq, cs, nbytes);
1248 else 1222 else
1249 return fuse_read_batch_forget(fc, cs, nbytes); 1223 return fuse_read_batch_forget(fiq, cs, nbytes);
1250} 1224}
1251 1225
1252/* 1226/*
@@ -1258,46 +1232,51 @@ __releases(fc->lock)
1258 * request_end(). Otherwise add it to the processing list, and set 1232 * request_end(). Otherwise add it to the processing list, and set
1259 * the 'sent' flag. 1233 * the 'sent' flag.
1260 */ 1234 */
1261static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, 1235static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
1262 struct fuse_copy_state *cs, size_t nbytes) 1236 struct fuse_copy_state *cs, size_t nbytes)
1263{ 1237{
1264 int err; 1238 ssize_t err;
1239 struct fuse_conn *fc = fud->fc;
1240 struct fuse_iqueue *fiq = &fc->iq;
1241 struct fuse_pqueue *fpq = &fud->pq;
1265 struct fuse_req *req; 1242 struct fuse_req *req;
1266 struct fuse_in *in; 1243 struct fuse_in *in;
1267 unsigned reqsize; 1244 unsigned reqsize;
1268 1245
1269 restart: 1246 restart:
1270 spin_lock(&fc->lock); 1247 spin_lock(&fiq->waitq.lock);
1271 err = -EAGAIN; 1248 err = -EAGAIN;
1272 if ((file->f_flags & O_NONBLOCK) && fc->connected && 1249 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1273 !request_pending(fc)) 1250 !request_pending(fiq))
1274 goto err_unlock; 1251 goto err_unlock;
1275 1252
1276 request_wait(fc); 1253 err = wait_event_interruptible_exclusive_locked(fiq->waitq,
1277 err = -ENODEV; 1254 !fiq->connected || request_pending(fiq));
1278 if (!fc->connected) 1255 if (err)
1279 goto err_unlock; 1256 goto err_unlock;
1280 err = -ERESTARTSYS; 1257
1281 if (!request_pending(fc)) 1258 err = -ENODEV;
1259 if (!fiq->connected)
1282 goto err_unlock; 1260 goto err_unlock;
1283 1261
1284 if (!list_empty(&fc->interrupts)) { 1262 if (!list_empty(&fiq->interrupts)) {
1285 req = list_entry(fc->interrupts.next, struct fuse_req, 1263 req = list_entry(fiq->interrupts.next, struct fuse_req,
1286 intr_entry); 1264 intr_entry);
1287 return fuse_read_interrupt(fc, cs, nbytes, req); 1265 return fuse_read_interrupt(fiq, cs, nbytes, req);
1288 } 1266 }
1289 1267
1290 if (forget_pending(fc)) { 1268 if (forget_pending(fiq)) {
1291 if (list_empty(&fc->pending) || fc->forget_batch-- > 0) 1269 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1292 return fuse_read_forget(fc, cs, nbytes); 1270 return fuse_read_forget(fc, fiq, cs, nbytes);
1293 1271
1294 if (fc->forget_batch <= -8) 1272 if (fiq->forget_batch <= -8)
1295 fc->forget_batch = 16; 1273 fiq->forget_batch = 16;
1296 } 1274 }
1297 1275
1298 req = list_entry(fc->pending.next, struct fuse_req, list); 1276 req = list_entry(fiq->pending.next, struct fuse_req, list);
1299 req->state = FUSE_REQ_READING; 1277 clear_bit(FR_PENDING, &req->flags);
1300 list_move(&req->list, &fc->io); 1278 list_del_init(&req->list);
1279 spin_unlock(&fiq->waitq.lock);
1301 1280
1302 in = &req->in; 1281 in = &req->in;
1303 reqsize = in->h.len; 1282 reqsize = in->h.len;
@@ -1310,37 +1289,48 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1310 request_end(fc, req); 1289 request_end(fc, req);
1311 goto restart; 1290 goto restart;
1312 } 1291 }
1313 spin_unlock(&fc->lock); 1292 spin_lock(&fpq->lock);
1293 list_add(&req->list, &fpq->io);
1294 spin_unlock(&fpq->lock);
1314 cs->req = req; 1295 cs->req = req;
1315 err = fuse_copy_one(cs, &in->h, sizeof(in->h)); 1296 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1316 if (!err) 1297 if (!err)
1317 err = fuse_copy_args(cs, in->numargs, in->argpages, 1298 err = fuse_copy_args(cs, in->numargs, in->argpages,
1318 (struct fuse_arg *) in->args, 0); 1299 (struct fuse_arg *) in->args, 0);
1319 fuse_copy_finish(cs); 1300 fuse_copy_finish(cs);
1320 spin_lock(&fc->lock); 1301 spin_lock(&fpq->lock);
1321 req->locked = 0; 1302 clear_bit(FR_LOCKED, &req->flags);
1322 if (req->aborted) { 1303 if (!fpq->connected) {
1323 request_end(fc, req); 1304 err = -ENODEV;
1324 return -ENODEV; 1305 goto out_end;
1325 } 1306 }
1326 if (err) { 1307 if (err) {
1327 req->out.h.error = -EIO; 1308 req->out.h.error = -EIO;
1328 request_end(fc, req); 1309 goto out_end;
1329 return err;
1330 } 1310 }
1331 if (!req->isreply) 1311 if (!test_bit(FR_ISREPLY, &req->flags)) {
1332 request_end(fc, req); 1312 err = reqsize;
1333 else { 1313 goto out_end;
1334 req->state = FUSE_REQ_SENT;
1335 list_move_tail(&req->list, &fc->processing);
1336 if (req->interrupted)
1337 queue_interrupt(fc, req);
1338 spin_unlock(&fc->lock);
1339 } 1314 }
1315 list_move_tail(&req->list, &fpq->processing);
1316 spin_unlock(&fpq->lock);
1317 set_bit(FR_SENT, &req->flags);
1318 /* matches barrier in request_wait_answer() */
1319 smp_mb__after_atomic();
1320 if (test_bit(FR_INTERRUPTED, &req->flags))
1321 queue_interrupt(fiq, req);
1322
1340 return reqsize; 1323 return reqsize;
1341 1324
1325out_end:
1326 if (!test_bit(FR_PRIVATE, &req->flags))
1327 list_del_init(&req->list);
1328 spin_unlock(&fpq->lock);
1329 request_end(fc, req);
1330 return err;
1331
1342 err_unlock: 1332 err_unlock:
1343 spin_unlock(&fc->lock); 1333 spin_unlock(&fiq->waitq.lock);
1344 return err; 1334 return err;
1345} 1335}
1346 1336
@@ -1359,16 +1349,17 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1359{ 1349{
1360 struct fuse_copy_state cs; 1350 struct fuse_copy_state cs;
1361 struct file *file = iocb->ki_filp; 1351 struct file *file = iocb->ki_filp;
1362 struct fuse_conn *fc = fuse_get_conn(file); 1352 struct fuse_dev *fud = fuse_get_dev(file);
1363 if (!fc) 1353
1354 if (!fud)
1364 return -EPERM; 1355 return -EPERM;
1365 1356
1366 if (!iter_is_iovec(to)) 1357 if (!iter_is_iovec(to))
1367 return -EINVAL; 1358 return -EINVAL;
1368 1359
1369 fuse_copy_init(&cs, fc, 1, to); 1360 fuse_copy_init(&cs, 1, to);
1370 1361
1371 return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to)); 1362 return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
1372} 1363}
1373 1364
1374static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, 1365static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
@@ -1380,18 +1371,19 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1380 int do_wakeup = 0; 1371 int do_wakeup = 0;
1381 struct pipe_buffer *bufs; 1372 struct pipe_buffer *bufs;
1382 struct fuse_copy_state cs; 1373 struct fuse_copy_state cs;
1383 struct fuse_conn *fc = fuse_get_conn(in); 1374 struct fuse_dev *fud = fuse_get_dev(in);
1384 if (!fc) 1375
1376 if (!fud)
1385 return -EPERM; 1377 return -EPERM;
1386 1378
1387 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); 1379 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1388 if (!bufs) 1380 if (!bufs)
1389 return -ENOMEM; 1381 return -ENOMEM;
1390 1382
1391 fuse_copy_init(&cs, fc, 1, NULL); 1383 fuse_copy_init(&cs, 1, NULL);
1392 cs.pipebufs = bufs; 1384 cs.pipebufs = bufs;
1393 cs.pipe = pipe; 1385 cs.pipe = pipe;
1394 ret = fuse_dev_do_read(fc, in, &cs, len); 1386 ret = fuse_dev_do_read(fud, in, &cs, len);
1395 if (ret < 0) 1387 if (ret < 0)
1396 goto out; 1388 goto out;
1397 1389
@@ -1830,11 +1822,11 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1830} 1822}
1831 1823
1832/* Look up request on processing list by unique ID */ 1824/* Look up request on processing list by unique ID */
1833static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) 1825static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
1834{ 1826{
1835 struct fuse_req *req; 1827 struct fuse_req *req;
1836 1828
1837 list_for_each_entry(req, &fc->processing, list) { 1829 list_for_each_entry(req, &fpq->processing, list) {
1838 if (req->in.h.unique == unique || req->intr_unique == unique) 1830 if (req->in.h.unique == unique || req->intr_unique == unique)
1839 return req; 1831 return req;
1840 } 1832 }
@@ -1871,10 +1863,12 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1871 * it from the list and copy the rest of the buffer to the request. 1863 * it from the list and copy the rest of the buffer to the request.
1872 * The request is finished by calling request_end() 1864 * The request is finished by calling request_end()
1873 */ 1865 */
1874static ssize_t fuse_dev_do_write(struct fuse_conn *fc, 1866static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
1875 struct fuse_copy_state *cs, size_t nbytes) 1867 struct fuse_copy_state *cs, size_t nbytes)
1876{ 1868{
1877 int err; 1869 int err;
1870 struct fuse_conn *fc = fud->fc;
1871 struct fuse_pqueue *fpq = &fud->pq;
1878 struct fuse_req *req; 1872 struct fuse_req *req;
1879 struct fuse_out_header oh; 1873 struct fuse_out_header oh;
1880 1874
@@ -1902,63 +1896,60 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1902 if (oh.error <= -1000 || oh.error > 0) 1896 if (oh.error <= -1000 || oh.error > 0)
1903 goto err_finish; 1897 goto err_finish;
1904 1898
1905 spin_lock(&fc->lock); 1899 spin_lock(&fpq->lock);
1906 err = -ENOENT; 1900 err = -ENOENT;
1907 if (!fc->connected) 1901 if (!fpq->connected)
1908 goto err_unlock; 1902 goto err_unlock_pq;
1909 1903
1910 req = request_find(fc, oh.unique); 1904 req = request_find(fpq, oh.unique);
1911 if (!req) 1905 if (!req)
1912 goto err_unlock; 1906 goto err_unlock_pq;
1913 1907
1914 if (req->aborted) {
1915 spin_unlock(&fc->lock);
1916 fuse_copy_finish(cs);
1917 spin_lock(&fc->lock);
1918 request_end(fc, req);
1919 return -ENOENT;
1920 }
1921 /* Is it an interrupt reply? */ 1908 /* Is it an interrupt reply? */
1922 if (req->intr_unique == oh.unique) { 1909 if (req->intr_unique == oh.unique) {
1910 spin_unlock(&fpq->lock);
1911
1923 err = -EINVAL; 1912 err = -EINVAL;
1924 if (nbytes != sizeof(struct fuse_out_header)) 1913 if (nbytes != sizeof(struct fuse_out_header))
1925 goto err_unlock; 1914 goto err_finish;
1926 1915
1927 if (oh.error == -ENOSYS) 1916 if (oh.error == -ENOSYS)
1928 fc->no_interrupt = 1; 1917 fc->no_interrupt = 1;
1929 else if (oh.error == -EAGAIN) 1918 else if (oh.error == -EAGAIN)
1930 queue_interrupt(fc, req); 1919 queue_interrupt(&fc->iq, req);
1931 1920
1932 spin_unlock(&fc->lock);
1933 fuse_copy_finish(cs); 1921 fuse_copy_finish(cs);
1934 return nbytes; 1922 return nbytes;
1935 } 1923 }
1936 1924
1937 req->state = FUSE_REQ_WRITING; 1925 clear_bit(FR_SENT, &req->flags);
1938 list_move(&req->list, &fc->io); 1926 list_move(&req->list, &fpq->io);
1939 req->out.h = oh; 1927 req->out.h = oh;
1940 req->locked = 1; 1928 set_bit(FR_LOCKED, &req->flags);
1929 spin_unlock(&fpq->lock);
1941 cs->req = req; 1930 cs->req = req;
1942 if (!req->out.page_replace) 1931 if (!req->out.page_replace)
1943 cs->move_pages = 0; 1932 cs->move_pages = 0;
1944 spin_unlock(&fc->lock);
1945 1933
1946 err = copy_out_args(cs, &req->out, nbytes); 1934 err = copy_out_args(cs, &req->out, nbytes);
1947 fuse_copy_finish(cs); 1935 fuse_copy_finish(cs);
1948 1936
1949 spin_lock(&fc->lock); 1937 spin_lock(&fpq->lock);
1950 req->locked = 0; 1938 clear_bit(FR_LOCKED, &req->flags);
1951 if (!err) { 1939 if (!fpq->connected)
1952 if (req->aborted) 1940 err = -ENOENT;
1953 err = -ENOENT; 1941 else if (err)
1954 } else if (!req->aborted)
1955 req->out.h.error = -EIO; 1942 req->out.h.error = -EIO;
1943 if (!test_bit(FR_PRIVATE, &req->flags))
1944 list_del_init(&req->list);
1945 spin_unlock(&fpq->lock);
1946
1956 request_end(fc, req); 1947 request_end(fc, req);
1957 1948
1958 return err ? err : nbytes; 1949 return err ? err : nbytes;
1959 1950
1960 err_unlock: 1951 err_unlock_pq:
1961 spin_unlock(&fc->lock); 1952 spin_unlock(&fpq->lock);
1962 err_finish: 1953 err_finish:
1963 fuse_copy_finish(cs); 1954 fuse_copy_finish(cs);
1964 return err; 1955 return err;
@@ -1967,16 +1958,17 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1967static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) 1958static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1968{ 1959{
1969 struct fuse_copy_state cs; 1960 struct fuse_copy_state cs;
1970 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); 1961 struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
1971 if (!fc) 1962
1963 if (!fud)
1972 return -EPERM; 1964 return -EPERM;
1973 1965
1974 if (!iter_is_iovec(from)) 1966 if (!iter_is_iovec(from))
1975 return -EINVAL; 1967 return -EINVAL;
1976 1968
1977 fuse_copy_init(&cs, fc, 0, from); 1969 fuse_copy_init(&cs, 0, from);
1978 1970
1979 return fuse_dev_do_write(fc, &cs, iov_iter_count(from)); 1971 return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
1980} 1972}
1981 1973
1982static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, 1974static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
@@ -1987,12 +1979,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1987 unsigned idx; 1979 unsigned idx;
1988 struct pipe_buffer *bufs; 1980 struct pipe_buffer *bufs;
1989 struct fuse_copy_state cs; 1981 struct fuse_copy_state cs;
1990 struct fuse_conn *fc; 1982 struct fuse_dev *fud;
1991 size_t rem; 1983 size_t rem;
1992 ssize_t ret; 1984 ssize_t ret;
1993 1985
1994 fc = fuse_get_conn(out); 1986 fud = fuse_get_dev(out);
1995 if (!fc) 1987 if (!fud)
1996 return -EPERM; 1988 return -EPERM;
1997 1989
1998 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); 1990 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
@@ -2039,7 +2031,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2039 } 2031 }
2040 pipe_unlock(pipe); 2032 pipe_unlock(pipe);
2041 2033
2042 fuse_copy_init(&cs, fc, 0, NULL); 2034 fuse_copy_init(&cs, 0, NULL);
2043 cs.pipebufs = bufs; 2035 cs.pipebufs = bufs;
2044 cs.nr_segs = nbuf; 2036 cs.nr_segs = nbuf;
2045 cs.pipe = pipe; 2037 cs.pipe = pipe;
@@ -2047,7 +2039,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2047 if (flags & SPLICE_F_MOVE) 2039 if (flags & SPLICE_F_MOVE)
2048 cs.move_pages = 1; 2040 cs.move_pages = 1;
2049 2041
2050 ret = fuse_dev_do_write(fc, &cs, len); 2042 ret = fuse_dev_do_write(fud, &cs, len);
2051 2043
2052 for (idx = 0; idx < nbuf; idx++) { 2044 for (idx = 0; idx < nbuf; idx++) {
2053 struct pipe_buffer *buf = &bufs[idx]; 2045 struct pipe_buffer *buf = &bufs[idx];
@@ -2061,18 +2053,21 @@ out:
2061static unsigned fuse_dev_poll(struct file *file, poll_table *wait) 2053static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2062{ 2054{
2063 unsigned mask = POLLOUT | POLLWRNORM; 2055 unsigned mask = POLLOUT | POLLWRNORM;
2064 struct fuse_conn *fc = fuse_get_conn(file); 2056 struct fuse_iqueue *fiq;
2065 if (!fc) 2057 struct fuse_dev *fud = fuse_get_dev(file);
2058
2059 if (!fud)
2066 return POLLERR; 2060 return POLLERR;
2067 2061
2068 poll_wait(file, &fc->waitq, wait); 2062 fiq = &fud->fc->iq;
2063 poll_wait(file, &fiq->waitq, wait);
2069 2064
2070 spin_lock(&fc->lock); 2065 spin_lock(&fiq->waitq.lock);
2071 if (!fc->connected) 2066 if (!fiq->connected)
2072 mask = POLLERR; 2067 mask = POLLERR;
2073 else if (request_pending(fc)) 2068 else if (request_pending(fiq))
2074 mask |= POLLIN | POLLRDNORM; 2069 mask |= POLLIN | POLLRDNORM;
2075 spin_unlock(&fc->lock); 2070 spin_unlock(&fiq->waitq.lock);
2076 2071
2077 return mask; 2072 return mask;
2078} 2073}
@@ -2083,67 +2078,18 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2083 * This function releases and reacquires fc->lock 2078 * This function releases and reacquires fc->lock
2084 */ 2079 */
2085static void end_requests(struct fuse_conn *fc, struct list_head *head) 2080static void end_requests(struct fuse_conn *fc, struct list_head *head)
2086__releases(fc->lock)
2087__acquires(fc->lock)
2088{ 2081{
2089 while (!list_empty(head)) { 2082 while (!list_empty(head)) {
2090 struct fuse_req *req; 2083 struct fuse_req *req;
2091 req = list_entry(head->next, struct fuse_req, list); 2084 req = list_entry(head->next, struct fuse_req, list);
2092 req->out.h.error = -ECONNABORTED; 2085 req->out.h.error = -ECONNABORTED;
2093 request_end(fc, req); 2086 clear_bit(FR_PENDING, &req->flags);
2094 spin_lock(&fc->lock); 2087 clear_bit(FR_SENT, &req->flags);
2095 }
2096}
2097
2098/*
2099 * Abort requests under I/O
2100 *
2101 * The requests are set to aborted and finished, and the request
2102 * waiter is woken up. This will make request_wait_answer() wait
2103 * until the request is unlocked and then return.
2104 *
2105 * If the request is asynchronous, then the end function needs to be
2106 * called after waiting for the request to be unlocked (if it was
2107 * locked).
2108 */
2109static void end_io_requests(struct fuse_conn *fc)
2110__releases(fc->lock)
2111__acquires(fc->lock)
2112{
2113 while (!list_empty(&fc->io)) {
2114 struct fuse_req *req =
2115 list_entry(fc->io.next, struct fuse_req, list);
2116 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2117
2118 req->aborted = 1;
2119 req->out.h.error = -ECONNABORTED;
2120 req->state = FUSE_REQ_FINISHED;
2121 list_del_init(&req->list); 2088 list_del_init(&req->list);
2122 wake_up(&req->waitq); 2089 request_end(fc, req);
2123 if (end) {
2124 req->end = NULL;
2125 __fuse_get_request(req);
2126 spin_unlock(&fc->lock);
2127 wait_event(req->waitq, !req->locked);
2128 end(fc, req);
2129 fuse_put_request(fc, req);
2130 spin_lock(&fc->lock);
2131 }
2132 } 2090 }
2133} 2091}
2134 2092
2135static void end_queued_requests(struct fuse_conn *fc)
2136__releases(fc->lock)
2137__acquires(fc->lock)
2138{
2139 fc->max_background = UINT_MAX;
2140 flush_bg_queue(fc);
2141 end_requests(fc, &fc->pending);
2142 end_requests(fc, &fc->processing);
2143 while (forget_pending(fc))
2144 kfree(dequeue_forget(fc, 1, NULL));
2145}
2146
2147static void end_polls(struct fuse_conn *fc) 2093static void end_polls(struct fuse_conn *fc)
2148{ 2094{
2149 struct rb_node *p; 2095 struct rb_node *p;
@@ -2162,67 +2108,156 @@ static void end_polls(struct fuse_conn *fc)
2162/* 2108/*
2163 * Abort all requests. 2109 * Abort all requests.
2164 * 2110 *
2165 * Emergency exit in case of a malicious or accidental deadlock, or 2111 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2166 * just a hung filesystem. 2112 * filesystem.
2167 * 2113 *
2168 * The same effect is usually achievable through killing the 2114 * The same effect is usually achievable through killing the filesystem daemon
2169 * filesystem daemon and all users of the filesystem. The exception 2115 * and all users of the filesystem. The exception is the combination of an
2170 * is the combination of an asynchronous request and the tricky 2116 * asynchronous request and the tricky deadlock (see
2171 * deadlock (see Documentation/filesystems/fuse.txt). 2117 * Documentation/filesystems/fuse.txt).
2172 * 2118 *
2173 * During the aborting, progression of requests from the pending and 2119 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2174 * processing lists onto the io list, and progression of new requests 2120 * requests, they should be finished off immediately. Locked requests will be
2175 * onto the pending list is prevented by req->connected being false. 2121 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2176 * 2122 * requests. It is possible that some request will finish before we can. This
2177 * Progression of requests under I/O to the processing list is 2123 * is OK, the request will in that case be removed from the list before we touch
2178 * prevented by the req->aborted flag being true for these requests. 2124 * it.
2179 * For this reason requests on the io list must be aborted first.
2180 */ 2125 */
2181void fuse_abort_conn(struct fuse_conn *fc) 2126void fuse_abort_conn(struct fuse_conn *fc)
2182{ 2127{
2128 struct fuse_iqueue *fiq = &fc->iq;
2129
2183 spin_lock(&fc->lock); 2130 spin_lock(&fc->lock);
2184 if (fc->connected) { 2131 if (fc->connected) {
2132 struct fuse_dev *fud;
2133 struct fuse_req *req, *next;
2134 LIST_HEAD(to_end1);
2135 LIST_HEAD(to_end2);
2136
2185 fc->connected = 0; 2137 fc->connected = 0;
2186 fc->blocked = 0; 2138 fc->blocked = 0;
2187 fuse_set_initialized(fc); 2139 fuse_set_initialized(fc);
2188 end_io_requests(fc); 2140 list_for_each_entry(fud, &fc->devices, entry) {
2189 end_queued_requests(fc); 2141 struct fuse_pqueue *fpq = &fud->pq;
2142
2143 spin_lock(&fpq->lock);
2144 fpq->connected = 0;
2145 list_for_each_entry_safe(req, next, &fpq->io, list) {
2146 req->out.h.error = -ECONNABORTED;
2147 spin_lock(&req->waitq.lock);
2148 set_bit(FR_ABORTED, &req->flags);
2149 if (!test_bit(FR_LOCKED, &req->flags)) {
2150 set_bit(FR_PRIVATE, &req->flags);
2151 list_move(&req->list, &to_end1);
2152 }
2153 spin_unlock(&req->waitq.lock);
2154 }
2155 list_splice_init(&fpq->processing, &to_end2);
2156 spin_unlock(&fpq->lock);
2157 }
2158 fc->max_background = UINT_MAX;
2159 flush_bg_queue(fc);
2160
2161 spin_lock(&fiq->waitq.lock);
2162 fiq->connected = 0;
2163 list_splice_init(&fiq->pending, &to_end2);
2164 while (forget_pending(fiq))
2165 kfree(dequeue_forget(fiq, 1, NULL));
2166 wake_up_all_locked(&fiq->waitq);
2167 spin_unlock(&fiq->waitq.lock);
2168 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2190 end_polls(fc); 2169 end_polls(fc);
2191 wake_up_all(&fc->waitq);
2192 wake_up_all(&fc->blocked_waitq); 2170 wake_up_all(&fc->blocked_waitq);
2193 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 2171 spin_unlock(&fc->lock);
2172
2173 while (!list_empty(&to_end1)) {
2174 req = list_first_entry(&to_end1, struct fuse_req, list);
2175 __fuse_get_request(req);
2176 list_del_init(&req->list);
2177 request_end(fc, req);
2178 }
2179 end_requests(fc, &to_end2);
2180 } else {
2181 spin_unlock(&fc->lock);
2194 } 2182 }
2195 spin_unlock(&fc->lock);
2196} 2183}
2197EXPORT_SYMBOL_GPL(fuse_abort_conn); 2184EXPORT_SYMBOL_GPL(fuse_abort_conn);
2198 2185
2199int fuse_dev_release(struct inode *inode, struct file *file) 2186int fuse_dev_release(struct inode *inode, struct file *file)
2200{ 2187{
2201 struct fuse_conn *fc = fuse_get_conn(file); 2188 struct fuse_dev *fud = fuse_get_dev(file);
2202 if (fc) {
2203 spin_lock(&fc->lock);
2204 fc->connected = 0;
2205 fc->blocked = 0;
2206 fuse_set_initialized(fc);
2207 end_queued_requests(fc);
2208 end_polls(fc);
2209 wake_up_all(&fc->blocked_waitq);
2210 spin_unlock(&fc->lock);
2211 fuse_conn_put(fc);
2212 }
2213 2189
2190 if (fud) {
2191 struct fuse_conn *fc = fud->fc;
2192 struct fuse_pqueue *fpq = &fud->pq;
2193
2194 WARN_ON(!list_empty(&fpq->io));
2195 end_requests(fc, &fpq->processing);
2196 /* Are we the last open device? */
2197 if (atomic_dec_and_test(&fc->dev_count)) {
2198 WARN_ON(fc->iq.fasync != NULL);
2199 fuse_abort_conn(fc);
2200 }
2201 fuse_dev_free(fud);
2202 }
2214 return 0; 2203 return 0;
2215} 2204}
2216EXPORT_SYMBOL_GPL(fuse_dev_release); 2205EXPORT_SYMBOL_GPL(fuse_dev_release);
2217 2206
2218static int fuse_dev_fasync(int fd, struct file *file, int on) 2207static int fuse_dev_fasync(int fd, struct file *file, int on)
2219{ 2208{
2220 struct fuse_conn *fc = fuse_get_conn(file); 2209 struct fuse_dev *fud = fuse_get_dev(file);
2221 if (!fc) 2210
2211 if (!fud)
2222 return -EPERM; 2212 return -EPERM;
2223 2213
2224 /* No locking - fasync_helper does its own locking */ 2214 /* No locking - fasync_helper does its own locking */
2225 return fasync_helper(fd, file, on, &fc->fasync); 2215 return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
2216}
2217
2218static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
2219{
2220 struct fuse_dev *fud;
2221
2222 if (new->private_data)
2223 return -EINVAL;
2224
2225 fud = fuse_dev_alloc(fc);
2226 if (!fud)
2227 return -ENOMEM;
2228
2229 new->private_data = fud;
2230 atomic_inc(&fc->dev_count);
2231
2232 return 0;
2233}
2234
2235static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2236 unsigned long arg)
2237{
2238 int err = -ENOTTY;
2239
2240 if (cmd == FUSE_DEV_IOC_CLONE) {
2241 int oldfd;
2242
2243 err = -EFAULT;
2244 if (!get_user(oldfd, (__u32 __user *) arg)) {
2245 struct file *old = fget(oldfd);
2246
2247 err = -EINVAL;
2248 if (old) {
2249 struct fuse_dev *fud = fuse_get_dev(old);
2250
2251 if (fud) {
2252 mutex_lock(&fuse_mutex);
2253 err = fuse_device_clone(fud->fc, file);
2254 mutex_unlock(&fuse_mutex);
2255 }
2256 fput(old);
2257 }
2258 }
2259 }
2260 return err;
2226} 2261}
2227 2262
2228const struct file_operations fuse_dev_operations = { 2263const struct file_operations fuse_dev_operations = {
@@ -2236,6 +2271,8 @@ const struct file_operations fuse_dev_operations = {
2236 .poll = fuse_dev_poll, 2271 .poll = fuse_dev_poll,
2237 .release = fuse_dev_release, 2272 .release = fuse_dev_release,
2238 .fasync = fuse_dev_fasync, 2273 .fasync = fuse_dev_fasync,
2274 .unlocked_ioctl = fuse_dev_ioctl,
2275 .compat_ioctl = fuse_dev_ioctl,
2239}; 2276};
2240EXPORT_SYMBOL_GPL(fuse_dev_operations); 2277EXPORT_SYMBOL_GPL(fuse_dev_operations);
2241 2278
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 8c5e2fa68835..014fa8ba2b51 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -96,17 +96,17 @@ static void fuse_file_put(struct fuse_file *ff, bool sync)
96 * Drop the release request when client does not 96 * Drop the release request when client does not
97 * implement 'open' 97 * implement 'open'
98 */ 98 */
99 req->background = 0; 99 __clear_bit(FR_BACKGROUND, &req->flags);
100 iput(req->misc.release.inode); 100 iput(req->misc.release.inode);
101 fuse_put_request(ff->fc, req); 101 fuse_put_request(ff->fc, req);
102 } else if (sync) { 102 } else if (sync) {
103 req->background = 0; 103 __clear_bit(FR_BACKGROUND, &req->flags);
104 fuse_request_send(ff->fc, req); 104 fuse_request_send(ff->fc, req);
105 iput(req->misc.release.inode); 105 iput(req->misc.release.inode);
106 fuse_put_request(ff->fc, req); 106 fuse_put_request(ff->fc, req);
107 } else { 107 } else {
108 req->end = fuse_release_end; 108 req->end = fuse_release_end;
109 req->background = 1; 109 __set_bit(FR_BACKGROUND, &req->flags);
110 fuse_request_send_background(ff->fc, req); 110 fuse_request_send_background(ff->fc, req);
111 } 111 }
112 kfree(ff); 112 kfree(ff);
@@ -299,8 +299,8 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
299{ 299{
300 WARN_ON(atomic_read(&ff->count) > 1); 300 WARN_ON(atomic_read(&ff->count) > 1);
301 fuse_prepare_release(ff, flags, FUSE_RELEASE); 301 fuse_prepare_release(ff, flags, FUSE_RELEASE);
302 ff->reserved_req->force = 1; 302 __set_bit(FR_FORCE, &ff->reserved_req->flags);
303 ff->reserved_req->background = 0; 303 __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags);
304 fuse_request_send(ff->fc, ff->reserved_req); 304 fuse_request_send(ff->fc, ff->reserved_req);
305 fuse_put_request(ff->fc, ff->reserved_req); 305 fuse_put_request(ff->fc, ff->reserved_req);
306 kfree(ff); 306 kfree(ff);
@@ -426,7 +426,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
426 req->in.numargs = 1; 426 req->in.numargs = 1;
427 req->in.args[0].size = sizeof(inarg); 427 req->in.args[0].size = sizeof(inarg);
428 req->in.args[0].value = &inarg; 428 req->in.args[0].value = &inarg;
429 req->force = 1; 429 __set_bit(FR_FORCE, &req->flags);
430 fuse_request_send(fc, req); 430 fuse_request_send(fc, req);
431 err = req->out.h.error; 431 err = req->out.h.error;
432 fuse_put_request(fc, req); 432 fuse_put_request(fc, req);
@@ -1611,7 +1611,8 @@ static int fuse_writepage_locked(struct page *page)
1611 if (!req) 1611 if (!req)
1612 goto err; 1612 goto err;
1613 1613
1614 req->background = 1; /* writeback always goes to bg_queue */ 1614 /* writeback always goes to bg_queue */
1615 __set_bit(FR_BACKGROUND, &req->flags);
1615 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); 1616 tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1616 if (!tmp_page) 1617 if (!tmp_page)
1617 goto err_free; 1618 goto err_free;
@@ -1742,8 +1743,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1742 } 1743 }
1743 } 1744 }
1744 1745
1745 if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || 1746 if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
1746 old_req->state == FUSE_REQ_PENDING)) {
1747 struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host); 1747 struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
1748 1748
1749 copy_highpage(old_req->pages[0], page); 1749 copy_highpage(old_req->pages[0], page);
@@ -1830,7 +1830,7 @@ static int fuse_writepages_fill(struct page *page,
1830 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; 1830 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
1831 req->misc.write.next = NULL; 1831 req->misc.write.next = NULL;
1832 req->in.argpages = 1; 1832 req->in.argpages = 1;
1833 req->background = 1; 1833 __set_bit(FR_BACKGROUND, &req->flags);
1834 req->num_pages = 0; 1834 req->num_pages = 0;
1835 req->end = fuse_writepage_end; 1835 req->end = fuse_writepage_end;
1836 req->inode = inode; 1836 req->inode = inode;
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 7354dc142a50..405113101db8 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -241,16 +241,6 @@ struct fuse_args {
241 241
242#define FUSE_ARGS(args) struct fuse_args args = {} 242#define FUSE_ARGS(args) struct fuse_args args = {}
243 243
244/** The request state */
245enum fuse_req_state {
246 FUSE_REQ_INIT = 0,
247 FUSE_REQ_PENDING,
248 FUSE_REQ_READING,
249 FUSE_REQ_SENT,
250 FUSE_REQ_WRITING,
251 FUSE_REQ_FINISHED
252};
253
254/** The request IO state (for asynchronous processing) */ 244/** The request IO state (for asynchronous processing) */
255struct fuse_io_priv { 245struct fuse_io_priv {
256 int async; 246 int async;
@@ -267,7 +257,40 @@ struct fuse_io_priv {
267}; 257};
268 258
269/** 259/**
260 * Request flags
261 *
262 * FR_ISREPLY: set if the request has reply
263 * FR_FORCE: force sending of the request even if interrupted
264 * FR_BACKGROUND: request is sent in the background
265 * FR_WAITING: request is counted as "waiting"
266 * FR_ABORTED: the request was aborted
267 * FR_INTERRUPTED: the request has been interrupted
268 * FR_LOCKED: data is being copied to/from the request
269 * FR_PENDING: request is not yet in userspace
270 * FR_SENT: request is in userspace, waiting for an answer
271 * FR_FINISHED: request is finished
272 * FR_PRIVATE: request is on private list
273 */
274enum fuse_req_flag {
275 FR_ISREPLY,
276 FR_FORCE,
277 FR_BACKGROUND,
278 FR_WAITING,
279 FR_ABORTED,
280 FR_INTERRUPTED,
281 FR_LOCKED,
282 FR_PENDING,
283 FR_SENT,
284 FR_FINISHED,
285 FR_PRIVATE,
286};
287
288/**
270 * A request to the client 289 * A request to the client
290 *
291 * .waitq.lock protects the following fields:
292 * - FR_ABORTED
293 * - FR_LOCKED (may also be modified under fc->lock, tested under both)
271 */ 294 */
272struct fuse_req { 295struct fuse_req {
273 /** This can be on either pending processing or io lists in 296 /** This can be on either pending processing or io lists in
@@ -283,35 +306,8 @@ struct fuse_req {
283 /** Unique ID for the interrupt request */ 306 /** Unique ID for the interrupt request */
284 u64 intr_unique; 307 u64 intr_unique;
285 308
286 /* 309 /* Request flags, updated with test/set/clear_bit() */
287 * The following bitfields are either set once before the 310 unsigned long flags;
288 * request is queued or setting/clearing them is protected by
289 * fuse_conn->lock
290 */
291
292 /** True if the request has reply */
293 unsigned isreply:1;
294
295 /** Force sending of the request even if interrupted */
296 unsigned force:1;
297
298 /** The request was aborted */
299 unsigned aborted:1;
300
301 /** Request is sent in the background */
302 unsigned background:1;
303
304 /** The request has been interrupted */
305 unsigned interrupted:1;
306
307 /** Data is being copied to/from the request */
308 unsigned locked:1;
309
310 /** Request is counted as "waiting" */
311 unsigned waiting:1;
312
313 /** State of the request */
314 enum fuse_req_state state;
315 311
316 /** The request input */ 312 /** The request input */
317 struct fuse_in in; 313 struct fuse_in in;
@@ -380,6 +376,61 @@ struct fuse_req {
380 struct file *stolen_file; 376 struct file *stolen_file;
381}; 377};
382 378
379struct fuse_iqueue {
380 /** Connection established */
381 unsigned connected;
382
383 /** Readers of the connection are waiting on this */
384 wait_queue_head_t waitq;
385
386 /** The next unique request id */
387 u64 reqctr;
388
389 /** The list of pending requests */
390 struct list_head pending;
391
392 /** Pending interrupts */
393 struct list_head interrupts;
394
395 /** Queue of pending forgets */
396 struct fuse_forget_link forget_list_head;
397 struct fuse_forget_link *forget_list_tail;
398
399 /** Batching of FORGET requests (positive indicates FORGET batch) */
400 int forget_batch;
401
402 /** O_ASYNC requests */
403 struct fasync_struct *fasync;
404};
405
406struct fuse_pqueue {
407 /** Connection established */
408 unsigned connected;
409
410 /** Lock protecting accessess to members of this structure */
411 spinlock_t lock;
412
413 /** The list of requests being processed */
414 struct list_head processing;
415
416 /** The list of requests under I/O */
417 struct list_head io;
418};
419
420/**
421 * Fuse device instance
422 */
423struct fuse_dev {
424 /** Fuse connection for this device */
425 struct fuse_conn *fc;
426
427 /** Processing queue */
428 struct fuse_pqueue pq;
429
430 /** list entry on fc->devices */
431 struct list_head entry;
432};
433
383/** 434/**
384 * A Fuse connection. 435 * A Fuse connection.
385 * 436 *
@@ -394,6 +445,9 @@ struct fuse_conn {
394 /** Refcount */ 445 /** Refcount */
395 atomic_t count; 446 atomic_t count;
396 447
448 /** Number of fuse_dev's */
449 atomic_t dev_count;
450
397 struct rcu_head rcu; 451 struct rcu_head rcu;
398 452
399 /** The user id for this mount */ 453 /** The user id for this mount */
@@ -411,17 +465,8 @@ struct fuse_conn {
411 /** Maximum write size */ 465 /** Maximum write size */
412 unsigned max_write; 466 unsigned max_write;
413 467
414 /** Readers of the connection are waiting on this */ 468 /** Input queue */
415 wait_queue_head_t waitq; 469 struct fuse_iqueue iq;
416
417 /** The list of pending requests */
418 struct list_head pending;
419
420 /** The list of requests being processed */
421 struct list_head processing;
422
423 /** The list of requests under I/O */
424 struct list_head io;
425 470
426 /** The next unique kernel file handle */ 471 /** The next unique kernel file handle */
427 u64 khctr; 472 u64 khctr;
@@ -444,16 +489,6 @@ struct fuse_conn {
444 /** The list of background requests set aside for later queuing */ 489 /** The list of background requests set aside for later queuing */
445 struct list_head bg_queue; 490 struct list_head bg_queue;
446 491
447 /** Pending interrupts */
448 struct list_head interrupts;
449
450 /** Queue of pending forgets */
451 struct fuse_forget_link forget_list_head;
452 struct fuse_forget_link *forget_list_tail;
453
454 /** Batching of FORGET requests (positive indicates FORGET batch) */
455 int forget_batch;
456
457 /** Flag indicating that INIT reply has been received. Allocating 492 /** Flag indicating that INIT reply has been received. Allocating
458 * any fuse request will be suspended until the flag is set */ 493 * any fuse request will be suspended until the flag is set */
459 int initialized; 494 int initialized;
@@ -469,9 +504,6 @@ struct fuse_conn {
469 /** waitq for reserved requests */ 504 /** waitq for reserved requests */
470 wait_queue_head_t reserved_req_waitq; 505 wait_queue_head_t reserved_req_waitq;
471 506
472 /** The next unique request id */
473 u64 reqctr;
474
475 /** Connection established, cleared on umount, connection 507 /** Connection established, cleared on umount, connection
476 abort and device release */ 508 abort and device release */
477 unsigned connected; 509 unsigned connected;
@@ -594,9 +626,6 @@ struct fuse_conn {
594 /** number of dentries used in the above array */ 626 /** number of dentries used in the above array */
595 int ctl_ndents; 627 int ctl_ndents;
596 628
597 /** O_ASYNC requests */
598 struct fasync_struct *fasync;
599
600 /** Key for lock owner ID scrambling */ 629 /** Key for lock owner ID scrambling */
601 u32 scramble_key[4]; 630 u32 scramble_key[4];
602 631
@@ -614,6 +643,9 @@ struct fuse_conn {
614 643
615 /** Read/write semaphore to hold when accessing sb. */ 644 /** Read/write semaphore to hold when accessing sb. */
616 struct rw_semaphore killsb; 645 struct rw_semaphore killsb;
646
647 /** List of device instances belonging to this connection */
648 struct list_head devices;
617}; 649};
618 650
619static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) 651static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -826,6 +858,9 @@ void fuse_conn_init(struct fuse_conn *fc);
826 */ 858 */
827void fuse_conn_put(struct fuse_conn *fc); 859void fuse_conn_put(struct fuse_conn *fc);
828 860
861struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc);
862void fuse_dev_free(struct fuse_dev *fud);
863
829/** 864/**
830 * Add connection to control filesystem 865 * Add connection to control filesystem
831 */ 866 */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 082ac1c97f39..ac81f48ab2f4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -362,8 +362,8 @@ static void fuse_send_destroy(struct fuse_conn *fc)
362 if (req && fc->conn_init) { 362 if (req && fc->conn_init) {
363 fc->destroy_req = NULL; 363 fc->destroy_req = NULL;
364 req->in.h.opcode = FUSE_DESTROY; 364 req->in.h.opcode = FUSE_DESTROY;
365 req->force = 1; 365 __set_bit(FR_FORCE, &req->flags);
366 req->background = 0; 366 __clear_bit(FR_BACKGROUND, &req->flags);
367 fuse_request_send(fc, req); 367 fuse_request_send(fc, req);
368 fuse_put_request(fc, req); 368 fuse_put_request(fc, req);
369 } 369 }
@@ -567,30 +567,46 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
567 return 0; 567 return 0;
568} 568}
569 569
570static void fuse_iqueue_init(struct fuse_iqueue *fiq)
571{
572 memset(fiq, 0, sizeof(struct fuse_iqueue));
573 init_waitqueue_head(&fiq->waitq);
574 INIT_LIST_HEAD(&fiq->pending);
575 INIT_LIST_HEAD(&fiq->interrupts);
576 fiq->forget_list_tail = &fiq->forget_list_head;
577 fiq->connected = 1;
578}
579
580static void fuse_pqueue_init(struct fuse_pqueue *fpq)
581{
582 memset(fpq, 0, sizeof(struct fuse_pqueue));
583 spin_lock_init(&fpq->lock);
584 INIT_LIST_HEAD(&fpq->processing);
585 INIT_LIST_HEAD(&fpq->io);
586 fpq->connected = 1;
587}
588
570void fuse_conn_init(struct fuse_conn *fc) 589void fuse_conn_init(struct fuse_conn *fc)
571{ 590{
572 memset(fc, 0, sizeof(*fc)); 591 memset(fc, 0, sizeof(*fc));
573 spin_lock_init(&fc->lock); 592 spin_lock_init(&fc->lock);
574 init_rwsem(&fc->killsb); 593 init_rwsem(&fc->killsb);
575 atomic_set(&fc->count, 1); 594 atomic_set(&fc->count, 1);
576 init_waitqueue_head(&fc->waitq); 595 atomic_set(&fc->dev_count, 1);
577 init_waitqueue_head(&fc->blocked_waitq); 596 init_waitqueue_head(&fc->blocked_waitq);
578 init_waitqueue_head(&fc->reserved_req_waitq); 597 init_waitqueue_head(&fc->reserved_req_waitq);
579 INIT_LIST_HEAD(&fc->pending); 598 fuse_iqueue_init(&fc->iq);
580 INIT_LIST_HEAD(&fc->processing);
581 INIT_LIST_HEAD(&fc->io);
582 INIT_LIST_HEAD(&fc->interrupts);
583 INIT_LIST_HEAD(&fc->bg_queue); 599 INIT_LIST_HEAD(&fc->bg_queue);
584 INIT_LIST_HEAD(&fc->entry); 600 INIT_LIST_HEAD(&fc->entry);
585 fc->forget_list_tail = &fc->forget_list_head; 601 INIT_LIST_HEAD(&fc->devices);
586 atomic_set(&fc->num_waiting, 0); 602 atomic_set(&fc->num_waiting, 0);
587 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; 603 fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
588 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; 604 fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
589 fc->khctr = 0; 605 fc->khctr = 0;
590 fc->polled_files = RB_ROOT; 606 fc->polled_files = RB_ROOT;
591 fc->reqctr = 0;
592 fc->blocked = 0; 607 fc->blocked = 0;
593 fc->initialized = 0; 608 fc->initialized = 0;
609 fc->connected = 1;
594 fc->attr_version = 1; 610 fc->attr_version = 1;
595 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); 611 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
596} 612}
@@ -930,6 +946,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
930 946
931static void fuse_free_conn(struct fuse_conn *fc) 947static void fuse_free_conn(struct fuse_conn *fc)
932{ 948{
949 WARN_ON(!list_empty(&fc->devices));
933 kfree_rcu(fc, rcu); 950 kfree_rcu(fc, rcu);
934} 951}
935 952
@@ -975,8 +992,42 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
975 return 0; 992 return 0;
976} 993}
977 994
995struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc)
996{
997 struct fuse_dev *fud;
998
999 fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL);
1000 if (fud) {
1001 fud->fc = fuse_conn_get(fc);
1002 fuse_pqueue_init(&fud->pq);
1003
1004 spin_lock(&fc->lock);
1005 list_add_tail(&fud->entry, &fc->devices);
1006 spin_unlock(&fc->lock);
1007 }
1008
1009 return fud;
1010}
1011EXPORT_SYMBOL_GPL(fuse_dev_alloc);
1012
1013void fuse_dev_free(struct fuse_dev *fud)
1014{
1015 struct fuse_conn *fc = fud->fc;
1016
1017 if (fc) {
1018 spin_lock(&fc->lock);
1019 list_del(&fud->entry);
1020 spin_unlock(&fc->lock);
1021
1022 fuse_conn_put(fc);
1023 }
1024 kfree(fud);
1025}
1026EXPORT_SYMBOL_GPL(fuse_dev_free);
1027
978static int fuse_fill_super(struct super_block *sb, void *data, int silent) 1028static int fuse_fill_super(struct super_block *sb, void *data, int silent)
979{ 1029{
1030 struct fuse_dev *fud;
980 struct fuse_conn *fc; 1031 struct fuse_conn *fc;
981 struct inode *root; 1032 struct inode *root;
982 struct fuse_mount_data d; 1033 struct fuse_mount_data d;
@@ -1026,12 +1077,17 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1026 goto err_fput; 1077 goto err_fput;
1027 1078
1028 fuse_conn_init(fc); 1079 fuse_conn_init(fc);
1080 fc->release = fuse_free_conn;
1081
1082 fud = fuse_dev_alloc(fc);
1083 if (!fud)
1084 goto err_put_conn;
1029 1085
1030 fc->dev = sb->s_dev; 1086 fc->dev = sb->s_dev;
1031 fc->sb = sb; 1087 fc->sb = sb;
1032 err = fuse_bdi_init(fc, sb); 1088 err = fuse_bdi_init(fc, sb);
1033 if (err) 1089 if (err)
1034 goto err_put_conn; 1090 goto err_dev_free;
1035 1091
1036 sb->s_bdi = &fc->bdi; 1092 sb->s_bdi = &fc->bdi;
1037 1093
@@ -1040,7 +1096,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1040 fc->dont_mask = 1; 1096 fc->dont_mask = 1;
1041 sb->s_flags |= MS_POSIXACL; 1097 sb->s_flags |= MS_POSIXACL;
1042 1098
1043 fc->release = fuse_free_conn;
1044 fc->flags = d.flags; 1099 fc->flags = d.flags;
1045 fc->user_id = d.user_id; 1100 fc->user_id = d.user_id;
1046 fc->group_id = d.group_id; 1101 fc->group_id = d.group_id;
@@ -1053,14 +1108,14 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1053 root = fuse_get_root_inode(sb, d.rootmode); 1108 root = fuse_get_root_inode(sb, d.rootmode);
1054 root_dentry = d_make_root(root); 1109 root_dentry = d_make_root(root);
1055 if (!root_dentry) 1110 if (!root_dentry)
1056 goto err_put_conn; 1111 goto err_dev_free;
1057 /* only now - we want root dentry with NULL ->d_op */ 1112 /* only now - we want root dentry with NULL ->d_op */
1058 sb->s_d_op = &fuse_dentry_operations; 1113 sb->s_d_op = &fuse_dentry_operations;
1059 1114
1060 init_req = fuse_request_alloc(0); 1115 init_req = fuse_request_alloc(0);
1061 if (!init_req) 1116 if (!init_req)
1062 goto err_put_root; 1117 goto err_put_root;
1063 init_req->background = 1; 1118 __set_bit(FR_BACKGROUND, &init_req->flags);
1064 1119
1065 if (is_bdev) { 1120 if (is_bdev) {
1066 fc->destroy_req = fuse_request_alloc(0); 1121 fc->destroy_req = fuse_request_alloc(0);
@@ -1079,8 +1134,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1079 1134
1080 list_add_tail(&fc->entry, &fuse_conn_list); 1135 list_add_tail(&fc->entry, &fuse_conn_list);
1081 sb->s_root = root_dentry; 1136 sb->s_root = root_dentry;
1082 fc->connected = 1; 1137 file->private_data = fud;
1083 file->private_data = fuse_conn_get(fc);
1084 mutex_unlock(&fuse_mutex); 1138 mutex_unlock(&fuse_mutex);
1085 /* 1139 /*
1086 * atomic_dec_and_test() in fput() provides the necessary 1140 * atomic_dec_and_test() in fput() provides the necessary
@@ -1099,6 +1153,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1099 fuse_request_free(init_req); 1153 fuse_request_free(init_req);
1100 err_put_root: 1154 err_put_root:
1101 dput(root_dentry); 1155 dput(root_dentry);
1156 err_dev_free:
1157 fuse_dev_free(fud);
1102 err_put_conn: 1158 err_put_conn:
1103 fuse_bdi_destroy(fc); 1159 fuse_bdi_destroy(fc);
1104 fuse_conn_put(fc); 1160 fuse_conn_put(fc);