aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/dev.c237
-rw-r--r--fs/fuse/dir.c315
-rw-r--r--fs/fuse/file.c134
-rw-r--r--fs/fuse/fuse_i.h106
-rw-r--r--fs/fuse/inode.c282
5 files changed, 741 insertions, 333 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 8f873e621f41..4526da8907c6 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -21,18 +21,18 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21 21
22static kmem_cache_t *fuse_req_cachep; 22static kmem_cache_t *fuse_req_cachep;
23 23
24static inline struct fuse_conn *fuse_get_conn(struct file *file) 24static struct fuse_conn *fuse_get_conn(struct file *file)
25{ 25{
26 struct fuse_conn *fc; 26 struct fuse_conn *fc;
27 spin_lock(&fuse_lock); 27 spin_lock(&fuse_lock);
28 fc = file->private_data; 28 fc = file->private_data;
29 if (fc && !fc->mounted) 29 if (fc && !fc->connected)
30 fc = NULL; 30 fc = NULL;
31 spin_unlock(&fuse_lock); 31 spin_unlock(&fuse_lock);
32 return fc; 32 return fc;
33} 33}
34 34
35static inline void fuse_request_init(struct fuse_req *req) 35static void fuse_request_init(struct fuse_req *req)
36{ 36{
37 memset(req, 0, sizeof(*req)); 37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list); 38 INIT_LIST_HEAD(&req->list);
@@ -53,7 +53,7 @@ void fuse_request_free(struct fuse_req *req)
53 kmem_cache_free(fuse_req_cachep, req); 53 kmem_cache_free(fuse_req_cachep, req);
54} 54}
55 55
56static inline void block_sigs(sigset_t *oldset) 56static void block_sigs(sigset_t *oldset)
57{ 57{
58 sigset_t mask; 58 sigset_t mask;
59 59
@@ -61,7 +61,7 @@ static inline void block_sigs(sigset_t *oldset)
61 sigprocmask(SIG_BLOCK, &mask, oldset); 61 sigprocmask(SIG_BLOCK, &mask, oldset);
62} 62}
63 63
64static inline void restore_sigs(sigset_t *oldset) 64static void restore_sigs(sigset_t *oldset)
65{ 65{
66 sigprocmask(SIG_SETMASK, oldset, NULL); 66 sigprocmask(SIG_SETMASK, oldset, NULL);
67} 67}
@@ -109,18 +109,24 @@ struct fuse_req *fuse_get_request(struct fuse_conn *fc)
109 int intr; 109 int intr;
110 sigset_t oldset; 110 sigset_t oldset;
111 111
112 atomic_inc(&fc->num_waiting);
112 block_sigs(&oldset); 113 block_sigs(&oldset);
113 intr = down_interruptible(&fc->outstanding_sem); 114 intr = down_interruptible(&fc->outstanding_sem);
114 restore_sigs(&oldset); 115 restore_sigs(&oldset);
115 return intr ? NULL : do_get_request(fc); 116 if (intr) {
117 atomic_dec(&fc->num_waiting);
118 return NULL;
119 }
120 return do_get_request(fc);
116} 121}
117 122
118static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req) 123static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
119{ 124{
120 spin_lock(&fuse_lock); 125 spin_lock(&fuse_lock);
121 if (req->preallocated) 126 if (req->preallocated) {
127 atomic_dec(&fc->num_waiting);
122 list_add(&req->list, &fc->unused_list); 128 list_add(&req->list, &fc->unused_list);
123 else 129 } else
124 fuse_request_free(req); 130 fuse_request_free(req);
125 131
126 /* If we are in debt decrease that first */ 132 /* If we are in debt decrease that first */
@@ -151,19 +157,20 @@ void fuse_release_background(struct fuse_req *req)
151/* 157/*
152 * This function is called when a request is finished. Either a reply 158 * This function is called when a request is finished. Either a reply
153 * has arrived or it was interrupted (and not yet sent) or some error 159 * has arrived or it was interrupted (and not yet sent) or some error
154 * occurred during communication with userspace, or the device file was 160 * occurred during communication with userspace, or the device file
155 * closed. It decreases the reference count for the request. In case 161 * was closed. In case of a background request the reference to the
156 * of a background request the reference to the stored objects are 162 * stored objects are released. The requester thread is woken up (if
157 * released. The requester thread is woken up (if still waiting), and 163 * still waiting), the 'end' callback is called if given, else the
158 * finally the request is either freed or put on the unused_list 164 * reference to the request is released
159 * 165 *
160 * Called with fuse_lock, unlocks it 166 * Called with fuse_lock, unlocks it
161 */ 167 */
162static void request_end(struct fuse_conn *fc, struct fuse_req *req) 168static void request_end(struct fuse_conn *fc, struct fuse_req *req)
163{ 169{
164 int putback; 170 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
165 req->finished = 1; 171 req->end = NULL;
166 putback = atomic_dec_and_test(&req->count); 172 list_del(&req->list);
173 req->state = FUSE_REQ_FINISHED;
167 spin_unlock(&fuse_lock); 174 spin_unlock(&fuse_lock);
168 if (req->background) { 175 if (req->background) {
169 down_read(&fc->sbput_sem); 176 down_read(&fc->sbput_sem);
@@ -172,28 +179,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
172 up_read(&fc->sbput_sem); 179 up_read(&fc->sbput_sem);
173 } 180 }
174 wake_up(&req->waitq); 181 wake_up(&req->waitq);
175 if (req->in.h.opcode == FUSE_INIT) { 182 if (end)
176 int i; 183 end(fc, req);
177 184 else
178 if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION) 185 fuse_put_request(fc, req);
179 fc->conn_error = 1;
180
181 /* After INIT reply is received other requests can go
182 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
183 up()s on outstanding_sem. The last up() is done in
184 fuse_putback_request() */
185 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
186 up(&fc->outstanding_sem);
187 } else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
188 /* Special case for failed iget in CREATE */
189 u64 nodeid = req->in.h.nodeid;
190 __fuse_get_request(req);
191 fuse_reset_request(req);
192 fuse_send_forget(fc, req, nodeid, 1);
193 putback = 0;
194 }
195 if (putback)
196 fuse_putback_request(fc, req);
197} 186}
198 187
199/* 188/*
@@ -244,14 +233,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
244 233
245 spin_unlock(&fuse_lock); 234 spin_unlock(&fuse_lock);
246 block_sigs(&oldset); 235 block_sigs(&oldset);
247 wait_event_interruptible(req->waitq, req->finished); 236 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
248 restore_sigs(&oldset); 237 restore_sigs(&oldset);
249 spin_lock(&fuse_lock); 238 spin_lock(&fuse_lock);
250 if (req->finished) 239 if (req->state == FUSE_REQ_FINISHED && !req->interrupted)
251 return; 240 return;
252 241
253 req->out.h.error = -EINTR; 242 if (!req->interrupted) {
254 req->interrupted = 1; 243 req->out.h.error = -EINTR;
244 req->interrupted = 1;
245 }
255 if (req->locked) { 246 if (req->locked) {
256 /* This is uninterruptible sleep, because data is 247 /* This is uninterruptible sleep, because data is
257 being copied to/from the buffers of req. During 248 being copied to/from the buffers of req. During
@@ -262,10 +253,10 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
262 wait_event(req->waitq, !req->locked); 253 wait_event(req->waitq, !req->locked);
263 spin_lock(&fuse_lock); 254 spin_lock(&fuse_lock);
264 } 255 }
265 if (!req->sent && !list_empty(&req->list)) { 256 if (req->state == FUSE_REQ_PENDING) {
266 list_del(&req->list); 257 list_del(&req->list);
267 __fuse_put_request(req); 258 __fuse_put_request(req);
268 } else if (!req->finished && req->sent) 259 } else if (req->state == FUSE_REQ_SENT)
269 background_request(fc, req); 260 background_request(fc, req);
270} 261}
271 262
@@ -300,6 +291,7 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
300 fc->outstanding_debt++; 291 fc->outstanding_debt++;
301 } 292 }
302 list_add_tail(&req->list, &fc->pending); 293 list_add_tail(&req->list, &fc->pending);
294 req->state = FUSE_REQ_PENDING;
303 wake_up(&fc->waitq); 295 wake_up(&fc->waitq);
304} 296}
305 297
@@ -352,30 +344,12 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
352 request_send_nowait(fc, req); 344 request_send_nowait(fc, req);
353} 345}
354 346
355void fuse_send_init(struct fuse_conn *fc)
356{
357 /* This is called from fuse_read_super() so there's guaranteed
358 to be a request available */
359 struct fuse_req *req = do_get_request(fc);
360 struct fuse_init_in_out *arg = &req->misc.init_in_out;
361 arg->major = FUSE_KERNEL_VERSION;
362 arg->minor = FUSE_KERNEL_MINOR_VERSION;
363 req->in.h.opcode = FUSE_INIT;
364 req->in.numargs = 1;
365 req->in.args[0].size = sizeof(*arg);
366 req->in.args[0].value = arg;
367 req->out.numargs = 1;
368 req->out.args[0].size = sizeof(*arg);
369 req->out.args[0].value = arg;
370 request_send_background(fc, req);
371}
372
373/* 347/*
374 * Lock the request. Up to the next unlock_request() there mustn't be 348 * Lock the request. Up to the next unlock_request() there mustn't be
375 * anything that could cause a page-fault. If the request was already 349 * anything that could cause a page-fault. If the request was already
376 * interrupted bail out. 350 * interrupted bail out.
377 */ 351 */
378static inline int lock_request(struct fuse_req *req) 352static int lock_request(struct fuse_req *req)
379{ 353{
380 int err = 0; 354 int err = 0;
381 if (req) { 355 if (req) {
@@ -394,7 +368,7 @@ static inline int lock_request(struct fuse_req *req)
394 * requester thread is currently waiting for it to be unlocked, so 368 * requester thread is currently waiting for it to be unlocked, so
395 * wake it up. 369 * wake it up.
396 */ 370 */
397static inline void unlock_request(struct fuse_req *req) 371static void unlock_request(struct fuse_req *req)
398{ 372{
399 if (req) { 373 if (req) {
400 spin_lock(&fuse_lock); 374 spin_lock(&fuse_lock);
@@ -430,7 +404,7 @@ static void fuse_copy_init(struct fuse_copy_state *cs, int write,
430} 404}
431 405
432/* Unmap and put previous page of userspace buffer */ 406/* Unmap and put previous page of userspace buffer */
433static inline void fuse_copy_finish(struct fuse_copy_state *cs) 407static void fuse_copy_finish(struct fuse_copy_state *cs)
434{ 408{
435 if (cs->mapaddr) { 409 if (cs->mapaddr) {
436 kunmap_atomic(cs->mapaddr, KM_USER0); 410 kunmap_atomic(cs->mapaddr, KM_USER0);
@@ -479,8 +453,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
479} 453}
480 454
481/* Do as much copy to/from userspace buffer as we can */ 455/* Do as much copy to/from userspace buffer as we can */
482static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val, 456static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
483 unsigned *size)
484{ 457{
485 unsigned ncpy = min(*size, cs->len); 458 unsigned ncpy = min(*size, cs->len);
486 if (val) { 459 if (val) {
@@ -500,8 +473,8 @@ static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val,
500 * Copy a page in the request to/from the userspace buffer. Must be 473 * Copy a page in the request to/from the userspace buffer. Must be
501 * done atomically 474 * done atomically
502 */ 475 */
503static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page, 476static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
504 unsigned offset, unsigned count, int zeroing) 477 unsigned offset, unsigned count, int zeroing)
505{ 478{
506 if (page && zeroing && count < PAGE_SIZE) { 479 if (page && zeroing && count < PAGE_SIZE) {
507 void *mapaddr = kmap_atomic(page, KM_USER1); 480 void *mapaddr = kmap_atomic(page, KM_USER1);
@@ -583,7 +556,7 @@ static void request_wait(struct fuse_conn *fc)
583 DECLARE_WAITQUEUE(wait, current); 556 DECLARE_WAITQUEUE(wait, current);
584 557
585 add_wait_queue_exclusive(&fc->waitq, &wait); 558 add_wait_queue_exclusive(&fc->waitq, &wait);
586 while (fc->mounted && list_empty(&fc->pending)) { 559 while (fc->connected && list_empty(&fc->pending)) {
587 set_current_state(TASK_INTERRUPTIBLE); 560 set_current_state(TASK_INTERRUPTIBLE);
588 if (signal_pending(current)) 561 if (signal_pending(current))
589 break; 562 break;
@@ -615,6 +588,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
615 struct fuse_copy_state cs; 588 struct fuse_copy_state cs;
616 unsigned reqsize; 589 unsigned reqsize;
617 590
591 restart:
618 spin_lock(&fuse_lock); 592 spin_lock(&fuse_lock);
619 fc = file->private_data; 593 fc = file->private_data;
620 err = -EPERM; 594 err = -EPERM;
@@ -622,28 +596,34 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
622 goto err_unlock; 596 goto err_unlock;
623 request_wait(fc); 597 request_wait(fc);
624 err = -ENODEV; 598 err = -ENODEV;
625 if (!fc->mounted) 599 if (!fc->connected)
626 goto err_unlock; 600 goto err_unlock;
627 err = -ERESTARTSYS; 601 err = -ERESTARTSYS;
628 if (list_empty(&fc->pending)) 602 if (list_empty(&fc->pending))
629 goto err_unlock; 603 goto err_unlock;
630 604
631 req = list_entry(fc->pending.next, struct fuse_req, list); 605 req = list_entry(fc->pending.next, struct fuse_req, list);
632 list_del_init(&req->list); 606 req->state = FUSE_REQ_READING;
633 spin_unlock(&fuse_lock); 607 list_move(&req->list, &fc->io);
634 608
635 in = &req->in; 609 in = &req->in;
636 reqsize = req->in.h.len; 610 reqsize = in->h.len;
637 fuse_copy_init(&cs, 1, req, iov, nr_segs); 611 /* If request is too large, reply with an error and restart the read */
638 err = -EINVAL; 612 if (iov_length(iov, nr_segs) < reqsize) {
639 if (iov_length(iov, nr_segs) >= reqsize) { 613 req->out.h.error = -EIO;
640 err = fuse_copy_one(&cs, &in->h, sizeof(in->h)); 614 /* SETXATTR is special, since it may contain too large data */
641 if (!err) 615 if (in->h.opcode == FUSE_SETXATTR)
642 err = fuse_copy_args(&cs, in->numargs, in->argpages, 616 req->out.h.error = -E2BIG;
643 (struct fuse_arg *) in->args, 0); 617 request_end(fc, req);
618 goto restart;
644 } 619 }
620 spin_unlock(&fuse_lock);
621 fuse_copy_init(&cs, 1, req, iov, nr_segs);
622 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
623 if (!err)
624 err = fuse_copy_args(&cs, in->numargs, in->argpages,
625 (struct fuse_arg *) in->args, 0);
645 fuse_copy_finish(&cs); 626 fuse_copy_finish(&cs);
646
647 spin_lock(&fuse_lock); 627 spin_lock(&fuse_lock);
648 req->locked = 0; 628 req->locked = 0;
649 if (!err && req->interrupted) 629 if (!err && req->interrupted)
@@ -657,8 +637,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
657 if (!req->isreply) 637 if (!req->isreply)
658 request_end(fc, req); 638 request_end(fc, req);
659 else { 639 else {
660 req->sent = 1; 640 req->state = FUSE_REQ_SENT;
661 list_add_tail(&req->list, &fc->processing); 641 list_move_tail(&req->list, &fc->processing);
662 spin_unlock(&fuse_lock); 642 spin_unlock(&fuse_lock);
663 } 643 }
664 return reqsize; 644 return reqsize;
@@ -746,17 +726,23 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
746 goto err_finish; 726 goto err_finish;
747 727
748 spin_lock(&fuse_lock); 728 spin_lock(&fuse_lock);
729 err = -ENOENT;
730 if (!fc->connected)
731 goto err_unlock;
732
749 req = request_find(fc, oh.unique); 733 req = request_find(fc, oh.unique);
750 err = -EINVAL; 734 err = -EINVAL;
751 if (!req) 735 if (!req)
752 goto err_unlock; 736 goto err_unlock;
753 737
754 list_del_init(&req->list);
755 if (req->interrupted) { 738 if (req->interrupted) {
756 request_end(fc, req); 739 spin_unlock(&fuse_lock);
757 fuse_copy_finish(&cs); 740 fuse_copy_finish(&cs);
741 spin_lock(&fuse_lock);
742 request_end(fc, req);
758 return -ENOENT; 743 return -ENOENT;
759 } 744 }
745 list_move(&req->list, &fc->io);
760 req->out.h = oh; 746 req->out.h = oh;
761 req->locked = 1; 747 req->locked = 1;
762 cs.req = req; 748 cs.req = req;
@@ -810,19 +796,90 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
810 return mask; 796 return mask;
811} 797}
812 798
813/* Abort all requests on the given list (pending or processing) */ 799/*
800 * Abort all requests on the given list (pending or processing)
801 *
802 * This function releases and reacquires fuse_lock
803 */
814static void end_requests(struct fuse_conn *fc, struct list_head *head) 804static void end_requests(struct fuse_conn *fc, struct list_head *head)
815{ 805{
816 while (!list_empty(head)) { 806 while (!list_empty(head)) {
817 struct fuse_req *req; 807 struct fuse_req *req;
818 req = list_entry(head->next, struct fuse_req, list); 808 req = list_entry(head->next, struct fuse_req, list);
819 list_del_init(&req->list);
820 req->out.h.error = -ECONNABORTED; 809 req->out.h.error = -ECONNABORTED;
821 request_end(fc, req); 810 request_end(fc, req);
822 spin_lock(&fuse_lock); 811 spin_lock(&fuse_lock);
823 } 812 }
824} 813}
825 814
815/*
816 * Abort requests under I/O
817 *
818 * The requests are set to interrupted and finished, and the request
819 * waiter is woken up. This will make request_wait_answer() wait
820 * until the request is unlocked and then return.
821 *
822 * If the request is asynchronous, then the end function needs to be
823 * called after waiting for the request to be unlocked (if it was
824 * locked).
825 */
826static void end_io_requests(struct fuse_conn *fc)
827{
828 while (!list_empty(&fc->io)) {
829 struct fuse_req *req =
830 list_entry(fc->io.next, struct fuse_req, list);
831 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
832
833 req->interrupted = 1;
834 req->out.h.error = -ECONNABORTED;
835 req->state = FUSE_REQ_FINISHED;
836 list_del_init(&req->list);
837 wake_up(&req->waitq);
838 if (end) {
839 req->end = NULL;
840 /* The end function will consume this reference */
841 __fuse_get_request(req);
842 spin_unlock(&fuse_lock);
843 wait_event(req->waitq, !req->locked);
844 end(fc, req);
845 spin_lock(&fuse_lock);
846 }
847 }
848}
849
850/*
851 * Abort all requests.
852 *
853 * Emergency exit in case of a malicious or accidental deadlock, or
854 * just a hung filesystem.
855 *
856 * The same effect is usually achievable through killing the
857 * filesystem daemon and all users of the filesystem. The exception
858 * is the combination of an asynchronous request and the tricky
859 * deadlock (see Documentation/filesystems/fuse.txt).
860 *
861 * During the aborting, progression of requests from the pending and
862 * processing lists onto the io list, and progression of new requests
863 * onto the pending list is prevented by req->connected being false.
864 *
865 * Progression of requests under I/O to the processing list is
866 * prevented by the req->interrupted flag being true for these
867 * requests. For this reason requests on the io list must be aborted
868 * first.
869 */
870void fuse_abort_conn(struct fuse_conn *fc)
871{
872 spin_lock(&fuse_lock);
873 if (fc->connected) {
874 fc->connected = 0;
875 end_io_requests(fc);
876 end_requests(fc, &fc->pending);
877 end_requests(fc, &fc->processing);
878 wake_up_all(&fc->waitq);
879 }
880 spin_unlock(&fuse_lock);
881}
882
826static int fuse_dev_release(struct inode *inode, struct file *file) 883static int fuse_dev_release(struct inode *inode, struct file *file)
827{ 884{
828 struct fuse_conn *fc; 885 struct fuse_conn *fc;
@@ -833,9 +890,11 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
833 fc->connected = 0; 890 fc->connected = 0;
834 end_requests(fc, &fc->pending); 891 end_requests(fc, &fc->pending);
835 end_requests(fc, &fc->processing); 892 end_requests(fc, &fc->processing);
836 fuse_release_conn(fc);
837 } 893 }
838 spin_unlock(&fuse_lock); 894 spin_unlock(&fuse_lock);
895 if (fc)
896 kobject_put(&fc->kobj);
897
839 return 0; 898 return 0;
840} 899}
841 900
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index c045cc70c749..21fd59c7bc24 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -13,15 +13,66 @@
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/namei.h> 15#include <linux/namei.h>
16#include <linux/mount.h>
17 16
18static inline unsigned long time_to_jiffies(unsigned long sec, 17/*
19 unsigned long nsec) 18 * FUSE caches dentries and attributes with separate timeout. The
19 * time in jiffies until the dentry/attributes are valid is stored in
20 * dentry->d_time and fuse_inode->i_time respectively.
21 */
22
23/*
24 * Calculate the time in jiffies until a dentry/attributes are valid
25 */
26static unsigned long time_to_jiffies(unsigned long sec, unsigned long nsec)
20{ 27{
21 struct timespec ts = {sec, nsec}; 28 struct timespec ts = {sec, nsec};
22 return jiffies + timespec_to_jiffies(&ts); 29 return jiffies + timespec_to_jiffies(&ts);
23} 30}
24 31
32/*
33 * Set dentry and possibly attribute timeouts from the lookup/mk*
34 * replies
35 */
36static void fuse_change_timeout(struct dentry *entry, struct fuse_entry_out *o)
37{
38 entry->d_time = time_to_jiffies(o->entry_valid, o->entry_valid_nsec);
39 if (entry->d_inode)
40 get_fuse_inode(entry->d_inode)->i_time =
41 time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
42}
43
44/*
45 * Mark the attributes as stale, so that at the next call to
46 * ->getattr() they will be fetched from userspace
47 */
48void fuse_invalidate_attr(struct inode *inode)
49{
50 get_fuse_inode(inode)->i_time = jiffies - 1;
51}
52
53/*
54 * Just mark the entry as stale, so that a next attempt to look it up
55 * will result in a new lookup call to userspace
56 *
57 * This is called when a dentry is about to become negative and the
58 * timeout is unknown (unlink, rmdir, rename and in some cases
59 * lookup)
60 */
61static void fuse_invalidate_entry_cache(struct dentry *entry)
62{
63 entry->d_time = jiffies - 1;
64}
65
66/*
67 * Same as fuse_invalidate_entry_cache(), but also try to remove the
68 * dentry from the hash
69 */
70static void fuse_invalidate_entry(struct dentry *entry)
71{
72 d_invalidate(entry);
73 fuse_invalidate_entry_cache(entry);
74}
75
25static void fuse_lookup_init(struct fuse_req *req, struct inode *dir, 76static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
26 struct dentry *entry, 77 struct dentry *entry,
27 struct fuse_entry_out *outarg) 78 struct fuse_entry_out *outarg)
@@ -37,17 +88,34 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
37 req->out.args[0].value = outarg; 88 req->out.args[0].value = outarg;
38} 89}
39 90
91/*
92 * Check whether the dentry is still valid
93 *
94 * If the entry validity timeout has expired and the dentry is
95 * positive, try to redo the lookup. If the lookup results in a
96 * different inode, then let the VFS invalidate the dentry and redo
97 * the lookup once more. If the lookup results in the same inode,
98 * then refresh the attributes, timeouts and mark the dentry valid.
99 */
40static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) 100static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
41{ 101{
42 if (!entry->d_inode || is_bad_inode(entry->d_inode)) 102 struct inode *inode = entry->d_inode;
103
104 if (inode && is_bad_inode(inode))
43 return 0; 105 return 0;
44 else if (time_after(jiffies, entry->d_time)) { 106 else if (time_after(jiffies, entry->d_time)) {
45 int err; 107 int err;
46 struct fuse_entry_out outarg; 108 struct fuse_entry_out outarg;
47 struct inode *inode = entry->d_inode; 109 struct fuse_conn *fc;
48 struct fuse_inode *fi = get_fuse_inode(inode); 110 struct fuse_req *req;
49 struct fuse_conn *fc = get_fuse_conn(inode); 111
50 struct fuse_req *req = fuse_get_request(fc); 112 /* Doesn't hurt to "reset" the validity timeout */
113 fuse_invalidate_entry_cache(entry);
114 if (!inode)
115 return 0;
116
117 fc = get_fuse_conn(inode);
118 req = fuse_get_request(fc);
51 if (!req) 119 if (!req)
52 return 0; 120 return 0;
53 121
@@ -55,6 +123,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
55 request_send(fc, req); 123 request_send(fc, req);
56 err = req->out.h.error; 124 err = req->out.h.error;
57 if (!err) { 125 if (!err) {
126 struct fuse_inode *fi = get_fuse_inode(inode);
58 if (outarg.nodeid != get_node_id(inode)) { 127 if (outarg.nodeid != get_node_id(inode)) {
59 fuse_send_forget(fc, req, outarg.nodeid, 1); 128 fuse_send_forget(fc, req, outarg.nodeid, 1);
60 return 0; 129 return 0;
@@ -66,20 +135,44 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
66 return 0; 135 return 0;
67 136
68 fuse_change_attributes(inode, &outarg.attr); 137 fuse_change_attributes(inode, &outarg.attr);
69 entry->d_time = time_to_jiffies(outarg.entry_valid, 138 fuse_change_timeout(entry, &outarg);
70 outarg.entry_valid_nsec);
71 fi->i_time = time_to_jiffies(outarg.attr_valid,
72 outarg.attr_valid_nsec);
73 } 139 }
74 return 1; 140 return 1;
75} 141}
76 142
143/*
144 * Check if there's already a hashed alias of this directory inode.
145 * If yes, then lookup and mkdir must not create a new alias.
146 */
147static int dir_alias(struct inode *inode)
148{
149 if (S_ISDIR(inode->i_mode)) {
150 struct dentry *alias = d_find_alias(inode);
151 if (alias) {
152 dput(alias);
153 return 1;
154 }
155 }
156 return 0;
157}
158
159static int invalid_nodeid(u64 nodeid)
160{
161 return !nodeid || nodeid == FUSE_ROOT_ID;
162}
163
77static struct dentry_operations fuse_dentry_operations = { 164static struct dentry_operations fuse_dentry_operations = {
78 .d_revalidate = fuse_dentry_revalidate, 165 .d_revalidate = fuse_dentry_revalidate,
79}; 166};
80 167
81static int fuse_lookup_iget(struct inode *dir, struct dentry *entry, 168static int valid_mode(int m)
82 struct inode **inodep) 169{
170 return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) ||
171 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
172}
173
174static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
175 struct nameidata *nd)
83{ 176{
84 int err; 177 int err;
85 struct fuse_entry_out outarg; 178 struct fuse_entry_out outarg;
@@ -88,53 +181,49 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry,
88 struct fuse_req *req; 181 struct fuse_req *req;
89 182
90 if (entry->d_name.len > FUSE_NAME_MAX) 183 if (entry->d_name.len > FUSE_NAME_MAX)
91 return -ENAMETOOLONG; 184 return ERR_PTR(-ENAMETOOLONG);
92 185
93 req = fuse_get_request(fc); 186 req = fuse_get_request(fc);
94 if (!req) 187 if (!req)
95 return -EINTR; 188 return ERR_PTR(-EINTR);
96 189
97 fuse_lookup_init(req, dir, entry, &outarg); 190 fuse_lookup_init(req, dir, entry, &outarg);
98 request_send(fc, req); 191 request_send(fc, req);
99 err = req->out.h.error; 192 err = req->out.h.error;
100 if (!err && (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID)) 193 if (!err && ((outarg.nodeid && invalid_nodeid(outarg.nodeid)) ||
194 !valid_mode(outarg.attr.mode)))
101 err = -EIO; 195 err = -EIO;
102 if (!err) { 196 if (!err && outarg.nodeid) {
103 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 197 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
104 &outarg.attr); 198 &outarg.attr);
105 if (!inode) { 199 if (!inode) {
106 fuse_send_forget(fc, req, outarg.nodeid, 1); 200 fuse_send_forget(fc, req, outarg.nodeid, 1);
107 return -ENOMEM; 201 return ERR_PTR(-ENOMEM);
108 } 202 }
109 } 203 }
110 fuse_put_request(fc, req); 204 fuse_put_request(fc, req);
111 if (err && err != -ENOENT) 205 if (err && err != -ENOENT)
112 return err; 206 return ERR_PTR(err);
113 207
114 if (inode) { 208 if (inode && dir_alias(inode)) {
115 struct fuse_inode *fi = get_fuse_inode(inode); 209 iput(inode);
116 entry->d_time = time_to_jiffies(outarg.entry_valid, 210 return ERR_PTR(-EIO);
117 outarg.entry_valid_nsec);
118 fi->i_time = time_to_jiffies(outarg.attr_valid,
119 outarg.attr_valid_nsec);
120 } 211 }
121 212 d_add(entry, inode);
122 entry->d_op = &fuse_dentry_operations; 213 entry->d_op = &fuse_dentry_operations;
123 *inodep = inode; 214 if (!err)
124 return 0; 215 fuse_change_timeout(entry, &outarg);
125} 216 else
126 217 fuse_invalidate_entry_cache(entry);
127void fuse_invalidate_attr(struct inode *inode) 218 return NULL;
128{
129 get_fuse_inode(inode)->i_time = jiffies - 1;
130}
131
132static void fuse_invalidate_entry(struct dentry *entry)
133{
134 d_invalidate(entry);
135 entry->d_time = jiffies - 1;
136} 219}
137 220
221/*
222 * Atomic create+open operation
223 *
224 * If the filesystem doesn't support this, then fall back to separate
225 * 'mknod' + 'open' requests.
226 */
138static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, 227static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
139 struct nameidata *nd) 228 struct nameidata *nd)
140{ 229{
@@ -145,7 +234,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
145 struct fuse_open_in inarg; 234 struct fuse_open_in inarg;
146 struct fuse_open_out outopen; 235 struct fuse_open_out outopen;
147 struct fuse_entry_out outentry; 236 struct fuse_entry_out outentry;
148 struct fuse_inode *fi;
149 struct fuse_file *ff; 237 struct fuse_file *ff;
150 struct file *file; 238 struct file *file;
151 int flags = nd->intent.open.flags - 1; 239 int flags = nd->intent.open.flags - 1;
@@ -154,10 +242,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
154 if (fc->no_create) 242 if (fc->no_create)
155 goto out; 243 goto out;
156 244
157 err = -ENAMETOOLONG;
158 if (entry->d_name.len > FUSE_NAME_MAX)
159 goto out;
160
161 err = -EINTR; 245 err = -EINTR;
162 req = fuse_get_request(fc); 246 req = fuse_get_request(fc);
163 if (!req) 247 if (!req)
@@ -193,7 +277,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
193 } 277 }
194 278
195 err = -EIO; 279 err = -EIO;
196 if (!S_ISREG(outentry.attr.mode)) 280 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
197 goto out_free_ff; 281 goto out_free_ff;
198 282
199 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, 283 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
@@ -202,17 +286,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
202 if (!inode) { 286 if (!inode) {
203 flags &= ~(O_CREAT | O_EXCL | O_TRUNC); 287 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
204 ff->fh = outopen.fh; 288 ff->fh = outopen.fh;
289 /* Special release, with inode = NULL, this will
290 trigger a 'forget' request when the release is
291 complete */
205 fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0); 292 fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0);
206 goto out_put_request; 293 goto out_put_request;
207 } 294 }
208 fuse_put_request(fc, req); 295 fuse_put_request(fc, req);
209 entry->d_time = time_to_jiffies(outentry.entry_valid,
210 outentry.entry_valid_nsec);
211 fi = get_fuse_inode(inode);
212 fi->i_time = time_to_jiffies(outentry.attr_valid,
213 outentry.attr_valid_nsec);
214
215 d_instantiate(entry, inode); 296 d_instantiate(entry, inode);
297 fuse_change_timeout(entry, &outentry);
216 file = lookup_instantiate_filp(nd, entry, generic_file_open); 298 file = lookup_instantiate_filp(nd, entry, generic_file_open);
217 if (IS_ERR(file)) { 299 if (IS_ERR(file)) {
218 ff->fh = outopen.fh; 300 ff->fh = outopen.fh;
@@ -230,13 +312,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
230 return err; 312 return err;
231} 313}
232 314
315/*
316 * Code shared between mknod, mkdir, symlink and link
317 */
233static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, 318static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
234 struct inode *dir, struct dentry *entry, 319 struct inode *dir, struct dentry *entry,
235 int mode) 320 int mode)
236{ 321{
237 struct fuse_entry_out outarg; 322 struct fuse_entry_out outarg;
238 struct inode *inode; 323 struct inode *inode;
239 struct fuse_inode *fi;
240 int err; 324 int err;
241 325
242 req->in.h.nodeid = get_node_id(dir); 326 req->in.h.nodeid = get_node_id(dir);
@@ -250,10 +334,13 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
250 fuse_put_request(fc, req); 334 fuse_put_request(fc, req);
251 return err; 335 return err;
252 } 336 }
253 if (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID) { 337 err = -EIO;
254 fuse_put_request(fc, req); 338 if (invalid_nodeid(outarg.nodeid))
255 return -EIO; 339 goto out_put_request;
256 } 340
341 if ((outarg.attr.mode ^ mode) & S_IFMT)
342 goto out_put_request;
343
257 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, 344 inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
258 &outarg.attr); 345 &outarg.attr);
259 if (!inode) { 346 if (!inode) {
@@ -262,22 +349,19 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
262 } 349 }
263 fuse_put_request(fc, req); 350 fuse_put_request(fc, req);
264 351
265 /* Don't allow userspace to do really stupid things... */ 352 if (dir_alias(inode)) {
266 if ((inode->i_mode ^ mode) & S_IFMT) {
267 iput(inode); 353 iput(inode);
268 return -EIO; 354 return -EIO;
269 } 355 }
270 356
271 entry->d_time = time_to_jiffies(outarg.entry_valid,
272 outarg.entry_valid_nsec);
273
274 fi = get_fuse_inode(inode);
275 fi->i_time = time_to_jiffies(outarg.attr_valid,
276 outarg.attr_valid_nsec);
277
278 d_instantiate(entry, inode); 357 d_instantiate(entry, inode);
358 fuse_change_timeout(entry, &outarg);
279 fuse_invalidate_attr(dir); 359 fuse_invalidate_attr(dir);
280 return 0; 360 return 0;
361
362 out_put_request:
363 fuse_put_request(fc, req);
364 return err;
281} 365}
282 366
283static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode, 367static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode,
@@ -337,12 +421,7 @@ static int fuse_symlink(struct inode *dir, struct dentry *entry,
337{ 421{
338 struct fuse_conn *fc = get_fuse_conn(dir); 422 struct fuse_conn *fc = get_fuse_conn(dir);
339 unsigned len = strlen(link) + 1; 423 unsigned len = strlen(link) + 1;
340 struct fuse_req *req; 424 struct fuse_req *req = fuse_get_request(fc);
341
342 if (len > FUSE_SYMLINK_MAX)
343 return -ENAMETOOLONG;
344
345 req = fuse_get_request(fc);
346 if (!req) 425 if (!req)
347 return -EINTR; 426 return -EINTR;
348 427
@@ -381,6 +460,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
381 inode->i_nlink = 0; 460 inode->i_nlink = 0;
382 fuse_invalidate_attr(inode); 461 fuse_invalidate_attr(inode);
383 fuse_invalidate_attr(dir); 462 fuse_invalidate_attr(dir);
463 fuse_invalidate_entry_cache(entry);
384 } else if (err == -EINTR) 464 } else if (err == -EINTR)
385 fuse_invalidate_entry(entry); 465 fuse_invalidate_entry(entry);
386 return err; 466 return err;
@@ -406,6 +486,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
406 if (!err) { 486 if (!err) {
407 entry->d_inode->i_nlink = 0; 487 entry->d_inode->i_nlink = 0;
408 fuse_invalidate_attr(dir); 488 fuse_invalidate_attr(dir);
489 fuse_invalidate_entry_cache(entry);
409 } else if (err == -EINTR) 490 } else if (err == -EINTR)
410 fuse_invalidate_entry(entry); 491 fuse_invalidate_entry(entry);
411 return err; 492 return err;
@@ -441,6 +522,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
441 fuse_invalidate_attr(olddir); 522 fuse_invalidate_attr(olddir);
442 if (olddir != newdir) 523 if (olddir != newdir)
443 fuse_invalidate_attr(newdir); 524 fuse_invalidate_attr(newdir);
525
526 /* newent will end up negative */
527 if (newent->d_inode)
528 fuse_invalidate_entry_cache(newent);
444 } else if (err == -EINTR) { 529 } else if (err == -EINTR) {
445 /* If request was interrupted, DEITY only knows if the 530 /* If request was interrupted, DEITY only knows if the
446 rename actually took place. If the invalidation 531 rename actually took place. If the invalidation
@@ -548,6 +633,15 @@ static int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task)
548 return 0; 633 return 0;
549} 634}
550 635
636/*
637 * Check whether the inode attributes are still valid
638 *
639 * If the attribute validity timeout has expired, then fetch the fresh
640 * attributes with a 'getattr' request
641 *
642 * I'm not sure why cached attributes are never returned for the root
643 * inode, this is probably being too cautious.
644 */
551static int fuse_revalidate(struct dentry *entry) 645static int fuse_revalidate(struct dentry *entry)
552{ 646{
553 struct inode *inode = entry->d_inode; 647 struct inode *inode = entry->d_inode;
@@ -595,6 +689,19 @@ static int fuse_access(struct inode *inode, int mask)
595 return err; 689 return err;
596} 690}
597 691
692/*
693 * Check permission. The two basic access models of FUSE are:
694 *
695 * 1) Local access checking ('default_permissions' mount option) based
696 * on file mode. This is the plain old disk filesystem permission
697 * modell.
698 *
699 * 2) "Remote" access checking, where server is responsible for
700 * checking permission in each inode operation. An exception to this
701 * is if ->permission() was invoked from sys_access() in which case an
702 * access request is sent. Execute permission is still checked
703 * locally based on file mode.
704 */
598static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd) 705static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
599{ 706{
600 struct fuse_conn *fc = get_fuse_conn(inode); 707 struct fuse_conn *fc = get_fuse_conn(inode);
@@ -613,14 +720,10 @@ static int fuse_permission(struct inode *inode, int mask, struct nameidata *nd)
613 err = generic_permission(inode, mask, NULL); 720 err = generic_permission(inode, mask, NULL);
614 } 721 }
615 722
616 /* FIXME: Need some mechanism to revoke permissions: 723 /* Note: the opposite of the above test does not
617 currently if the filesystem suddenly changes the 724 exist. So if permissions are revoked this won't be
618 file mode, we will not be informed about it, and 725 noticed immediately, only after the attribute
619 continue to allow access to the file/directory. 726 timeout has expired */
620
621 This is actually not so grave, since the user can
622 simply keep access to the file/directory anyway by
623 keeping it open... */
624 727
625 return err; 728 return err;
626 } else { 729 } else {
@@ -659,13 +762,6 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
659 return 0; 762 return 0;
660} 763}
661 764
662static inline size_t fuse_send_readdir(struct fuse_req *req, struct file *file,
663 struct inode *inode, loff_t pos,
664 size_t count)
665{
666 return fuse_send_read_common(req, file, inode, pos, count, 1);
667}
668
669static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) 765static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
670{ 766{
671 int err; 767 int err;
@@ -673,7 +769,12 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
673 struct page *page; 769 struct page *page;
674 struct inode *inode = file->f_dentry->d_inode; 770 struct inode *inode = file->f_dentry->d_inode;
675 struct fuse_conn *fc = get_fuse_conn(inode); 771 struct fuse_conn *fc = get_fuse_conn(inode);
676 struct fuse_req *req = fuse_get_request(fc); 772 struct fuse_req *req;
773
774 if (is_bad_inode(inode))
775 return -EIO;
776
777 req = fuse_get_request(fc);
677 if (!req) 778 if (!req)
678 return -EINTR; 779 return -EINTR;
679 780
@@ -684,7 +785,9 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
684 } 785 }
685 req->num_pages = 1; 786 req->num_pages = 1;
686 req->pages[0] = page; 787 req->pages[0] = page;
687 nbytes = fuse_send_readdir(req, file, inode, file->f_pos, PAGE_SIZE); 788 fuse_read_fill(req, file, inode, file->f_pos, PAGE_SIZE, FUSE_READDIR);
789 request_send(fc, req);
790 nbytes = req->out.args[0].size;
688 err = req->out.h.error; 791 err = req->out.h.error;
689 fuse_put_request(fc, req); 792 fuse_put_request(fc, req);
690 if (!err) 793 if (!err)
@@ -788,6 +891,15 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
788 } 891 }
789} 892}
790 893
894/*
895 * Set attributes, and at the same time refresh them.
896 *
897 * Truncation is slightly complicated, because the 'truncate' request
898 * may fail, in which case we don't want to touch the mapping.
899 * vmtruncate() doesn't allow for this case. So do the rlimit
900 * checking by hand and call vmtruncate() only after the file has
901 * actually been truncated.
902 */
791static int fuse_setattr(struct dentry *entry, struct iattr *attr) 903static int fuse_setattr(struct dentry *entry, struct iattr *attr)
792{ 904{
793 struct inode *inode = entry->d_inode; 905 struct inode *inode = entry->d_inode;
@@ -865,28 +977,6 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
865 return err; 977 return err;
866} 978}
867 979
868static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
869 struct nameidata *nd)
870{
871 struct inode *inode;
872 int err;
873
874 err = fuse_lookup_iget(dir, entry, &inode);
875 if (err)
876 return ERR_PTR(err);
877 if (inode && S_ISDIR(inode->i_mode)) {
878 /* Don't allow creating an alias to a directory */
879 struct dentry *alias = d_find_alias(inode);
880 if (alias) {
881 dput(alias);
882 iput(inode);
883 return ERR_PTR(-EIO);
884 }
885 }
886 d_add(entry, inode);
887 return NULL;
888}
889
890static int fuse_setxattr(struct dentry *entry, const char *name, 980static int fuse_setxattr(struct dentry *entry, const char *name,
891 const void *value, size_t size, int flags) 981 const void *value, size_t size, int flags)
892{ 982{
@@ -896,9 +986,6 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
896 struct fuse_setxattr_in inarg; 986 struct fuse_setxattr_in inarg;
897 int err; 987 int err;
898 988
899 if (size > FUSE_XATTR_SIZE_MAX)
900 return -E2BIG;
901
902 if (fc->no_setxattr) 989 if (fc->no_setxattr)
903 return -EOPNOTSUPP; 990 return -EOPNOTSUPP;
904 991
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 2ca86141d13a..a7ef5e716f3c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -113,6 +113,14 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
113 return err; 113 return err;
114} 114}
115 115
116/* Special case for failed iget in CREATE */
117static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
118{
119 u64 nodeid = req->in.h.nodeid;
120 fuse_reset_request(req);
121 fuse_send_forget(fc, req, nodeid, 1);
122}
123
116void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, 124void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
117 u64 nodeid, struct inode *inode, int flags, int isdir) 125 u64 nodeid, struct inode *inode, int flags, int isdir)
118{ 126{
@@ -128,6 +136,8 @@ void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
128 req->in.args[0].size = sizeof(struct fuse_release_in); 136 req->in.args[0].size = sizeof(struct fuse_release_in);
129 req->in.args[0].value = inarg; 137 req->in.args[0].value = inarg;
130 request_send_background(fc, req); 138 request_send_background(fc, req);
139 if (!inode)
140 req->end = fuse_release_end;
131 kfree(ff); 141 kfree(ff);
132} 142}
133 143
@@ -163,6 +173,9 @@ static int fuse_flush(struct file *file)
163 struct fuse_flush_in inarg; 173 struct fuse_flush_in inarg;
164 int err; 174 int err;
165 175
176 if (is_bad_inode(inode))
177 return -EIO;
178
166 if (fc->no_flush) 179 if (fc->no_flush)
167 return 0; 180 return 0;
168 181
@@ -199,6 +212,9 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
199 struct fuse_fsync_in inarg; 212 struct fuse_fsync_in inarg;
200 int err; 213 int err;
201 214
215 if (is_bad_inode(inode))
216 return -EIO;
217
202 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) 218 if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
203 return 0; 219 return 0;
204 220
@@ -234,54 +250,57 @@ static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
234 return fuse_fsync_common(file, de, datasync, 0); 250 return fuse_fsync_common(file, de, datasync, 0);
235} 251}
236 252
237size_t fuse_send_read_common(struct fuse_req *req, struct file *file, 253void fuse_read_fill(struct fuse_req *req, struct file *file,
238 struct inode *inode, loff_t pos, size_t count, 254 struct inode *inode, loff_t pos, size_t count, int opcode)
239 int isdir)
240{ 255{
241 struct fuse_conn *fc = get_fuse_conn(inode);
242 struct fuse_file *ff = file->private_data; 256 struct fuse_file *ff = file->private_data;
243 struct fuse_read_in inarg; 257 struct fuse_read_in *inarg = &req->misc.read_in;
244 258
245 memset(&inarg, 0, sizeof(struct fuse_read_in)); 259 inarg->fh = ff->fh;
246 inarg.fh = ff->fh; 260 inarg->offset = pos;
247 inarg.offset = pos; 261 inarg->size = count;
248 inarg.size = count; 262 req->in.h.opcode = opcode;
249 req->in.h.opcode = isdir ? FUSE_READDIR : FUSE_READ;
250 req->in.h.nodeid = get_node_id(inode); 263 req->in.h.nodeid = get_node_id(inode);
251 req->inode = inode; 264 req->inode = inode;
252 req->file = file; 265 req->file = file;
253 req->in.numargs = 1; 266 req->in.numargs = 1;
254 req->in.args[0].size = sizeof(struct fuse_read_in); 267 req->in.args[0].size = sizeof(struct fuse_read_in);
255 req->in.args[0].value = &inarg; 268 req->in.args[0].value = inarg;
256 req->out.argpages = 1; 269 req->out.argpages = 1;
257 req->out.argvar = 1; 270 req->out.argvar = 1;
258 req->out.numargs = 1; 271 req->out.numargs = 1;
259 req->out.args[0].size = count; 272 req->out.args[0].size = count;
260 request_send(fc, req);
261 return req->out.args[0].size;
262} 273}
263 274
264static inline size_t fuse_send_read(struct fuse_req *req, struct file *file, 275static size_t fuse_send_read(struct fuse_req *req, struct file *file,
265 struct inode *inode, loff_t pos, 276 struct inode *inode, loff_t pos, size_t count)
266 size_t count)
267{ 277{
268 return fuse_send_read_common(req, file, inode, pos, count, 0); 278 struct fuse_conn *fc = get_fuse_conn(inode);
279 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
280 request_send(fc, req);
281 return req->out.args[0].size;
269} 282}
270 283
271static int fuse_readpage(struct file *file, struct page *page) 284static int fuse_readpage(struct file *file, struct page *page)
272{ 285{
273 struct inode *inode = page->mapping->host; 286 struct inode *inode = page->mapping->host;
274 struct fuse_conn *fc = get_fuse_conn(inode); 287 struct fuse_conn *fc = get_fuse_conn(inode);
275 loff_t pos = (loff_t) page->index << PAGE_CACHE_SHIFT; 288 struct fuse_req *req;
276 struct fuse_req *req = fuse_get_request(fc); 289 int err;
277 int err = -EINTR; 290
291 err = -EIO;
292 if (is_bad_inode(inode))
293 goto out;
294
295 err = -EINTR;
296 req = fuse_get_request(fc);
278 if (!req) 297 if (!req)
279 goto out; 298 goto out;
280 299
281 req->out.page_zeroing = 1; 300 req->out.page_zeroing = 1;
282 req->num_pages = 1; 301 req->num_pages = 1;
283 req->pages[0] = page; 302 req->pages[0] = page;
284 fuse_send_read(req, file, inode, pos, PAGE_CACHE_SIZE); 303 fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE);
285 err = req->out.h.error; 304 err = req->out.h.error;
286 fuse_put_request(fc, req); 305 fuse_put_request(fc, req);
287 if (!err) 306 if (!err)
@@ -292,21 +311,33 @@ static int fuse_readpage(struct file *file, struct page *page)
292 return err; 311 return err;
293} 312}
294 313
295static int fuse_send_readpages(struct fuse_req *req, struct file *file, 314static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
296 struct inode *inode)
297{ 315{
298 loff_t pos = (loff_t) req->pages[0]->index << PAGE_CACHE_SHIFT; 316 int i;
299 size_t count = req->num_pages << PAGE_CACHE_SHIFT; 317
300 unsigned i; 318 fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */
301 req->out.page_zeroing = 1; 319
302 fuse_send_read(req, file, inode, pos, count);
303 for (i = 0; i < req->num_pages; i++) { 320 for (i = 0; i < req->num_pages; i++) {
304 struct page *page = req->pages[i]; 321 struct page *page = req->pages[i];
305 if (!req->out.h.error) 322 if (!req->out.h.error)
306 SetPageUptodate(page); 323 SetPageUptodate(page);
324 else
325 SetPageError(page);
307 unlock_page(page); 326 unlock_page(page);
308 } 327 }
309 return req->out.h.error; 328 fuse_put_request(fc, req);
329}
330
331static void fuse_send_readpages(struct fuse_req *req, struct file *file,
332 struct inode *inode)
333{
334 struct fuse_conn *fc = get_fuse_conn(inode);
335 loff_t pos = page_offset(req->pages[0]);
336 size_t count = req->num_pages << PAGE_CACHE_SHIFT;
337 req->out.page_zeroing = 1;
338 req->end = fuse_readpages_end;
339 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
340 request_send_background(fc, req);
310} 341}
311 342
312struct fuse_readpages_data { 343struct fuse_readpages_data {
@@ -326,12 +357,12 @@ static int fuse_readpages_fill(void *_data, struct page *page)
326 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || 357 (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
327 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || 358 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
328 req->pages[req->num_pages - 1]->index + 1 != page->index)) { 359 req->pages[req->num_pages - 1]->index + 1 != page->index)) {
329 int err = fuse_send_readpages(req, data->file, inode); 360 fuse_send_readpages(req, data->file, inode);
330 if (err) { 361 data->req = req = fuse_get_request(fc);
362 if (!req) {
331 unlock_page(page); 363 unlock_page(page);
332 return err; 364 return -EINTR;
333 } 365 }
334 fuse_reset_request(req);
335 } 366 }
336 req->pages[req->num_pages] = page; 367 req->pages[req->num_pages] = page;
337 req->num_pages ++; 368 req->num_pages ++;
@@ -345,6 +376,10 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
345 struct fuse_conn *fc = get_fuse_conn(inode); 376 struct fuse_conn *fc = get_fuse_conn(inode);
346 struct fuse_readpages_data data; 377 struct fuse_readpages_data data;
347 int err; 378 int err;
379
380 if (is_bad_inode(inode))
381 return -EIO;
382
348 data.file = file; 383 data.file = file;
349 data.inode = inode; 384 data.inode = inode;
350 data.req = fuse_get_request(fc); 385 data.req = fuse_get_request(fc);
@@ -352,10 +387,8 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
352 return -EINTR; 387 return -EINTR;
353 388
354 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); 389 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
355 if (!err && data.req->num_pages) 390 if (!err)
356 err = fuse_send_readpages(data.req, file, inode); 391 fuse_send_readpages(data.req, file, inode);
357 fuse_put_request(fc, data.req);
358 fuse_invalidate_attr(inode); /* atime changed */
359 return err; 392 return err;
360} 393}
361 394
@@ -402,8 +435,13 @@ static int fuse_commit_write(struct file *file, struct page *page,
402 unsigned count = to - offset; 435 unsigned count = to - offset;
403 struct inode *inode = page->mapping->host; 436 struct inode *inode = page->mapping->host;
404 struct fuse_conn *fc = get_fuse_conn(inode); 437 struct fuse_conn *fc = get_fuse_conn(inode);
405 loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + offset; 438 loff_t pos = page_offset(page) + offset;
406 struct fuse_req *req = fuse_get_request(fc); 439 struct fuse_req *req;
440
441 if (is_bad_inode(inode))
442 return -EIO;
443
444 req = fuse_get_request(fc);
407 if (!req) 445 if (!req)
408 return -EINTR; 446 return -EINTR;
409 447
@@ -454,7 +492,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
454 492
455 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); 493 nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
456 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; 494 npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
457 npages = min(npages, FUSE_MAX_PAGES_PER_REQ); 495 npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
458 down_read(&current->mm->mmap_sem); 496 down_read(&current->mm->mmap_sem);
459 npages = get_user_pages(current, current->mm, user_addr, npages, write, 497 npages = get_user_pages(current, current->mm, user_addr, npages, write,
460 0, req->pages, NULL); 498 0, req->pages, NULL);
@@ -475,12 +513,16 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
475 size_t nmax = write ? fc->max_write : fc->max_read; 513 size_t nmax = write ? fc->max_write : fc->max_read;
476 loff_t pos = *ppos; 514 loff_t pos = *ppos;
477 ssize_t res = 0; 515 ssize_t res = 0;
478 struct fuse_req *req = fuse_get_request(fc); 516 struct fuse_req *req;
517
518 if (is_bad_inode(inode))
519 return -EIO;
520
521 req = fuse_get_request(fc);
479 if (!req) 522 if (!req)
480 return -EINTR; 523 return -EINTR;
481 524
482 while (count) { 525 while (count) {
483 size_t tmp;
484 size_t nres; 526 size_t nres;
485 size_t nbytes = min(count, nmax); 527 size_t nbytes = min(count, nmax);
486 int err = fuse_get_user_pages(req, buf, nbytes, !write); 528 int err = fuse_get_user_pages(req, buf, nbytes, !write);
@@ -488,8 +530,8 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
488 res = err; 530 res = err;
489 break; 531 break;
490 } 532 }
491 tmp = (req->num_pages << PAGE_SHIFT) - req->page_offset; 533 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
492 nbytes = min(nbytes, tmp); 534 nbytes = min(count, nbytes);
493 if (write) 535 if (write)
494 nres = fuse_send_write(req, file, inode, pos, nbytes); 536 nres = fuse_send_write(req, file, inode, pos, nbytes);
495 else 537 else
@@ -535,9 +577,9 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
535 struct inode *inode = file->f_dentry->d_inode; 577 struct inode *inode = file->f_dentry->d_inode;
536 ssize_t res; 578 ssize_t res;
537 /* Don't allow parallel writes to the same file */ 579 /* Don't allow parallel writes to the same file */
538 down(&inode->i_sem); 580 mutex_lock(&inode->i_mutex);
539 res = fuse_direct_io(file, buf, count, ppos, 1); 581 res = fuse_direct_io(file, buf, count, ppos, 1);
540 up(&inode->i_sem); 582 mutex_unlock(&inode->i_mutex);
541 return res; 583 return res;
542} 584}
543 585
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0ea5301f86be..46cf933aa3bf 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,9 @@
21/** If more requests are outstanding, then the operation will block */ 21/** If more requests are outstanding, then the operation will block */
22#define FUSE_MAX_OUTSTANDING 10 22#define FUSE_MAX_OUTSTANDING 10
23 23
24/** It could be as large as PATH_MAX, but would that have any uses? */
25#define FUSE_NAME_MAX 1024
26
24/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem 27/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem
25 module will check permissions based on the file mode. Otherwise no 28 module will check permissions based on the file mode. Otherwise no
26 permission checking is done in the kernel */ 29 permission checking is done in the kernel */
@@ -91,6 +94,11 @@ struct fuse_out {
91 /** Header returned from userspace */ 94 /** Header returned from userspace */
92 struct fuse_out_header h; 95 struct fuse_out_header h;
93 96
97 /*
98 * The following bitfields are not changed during the request
99 * processing
100 */
101
94 /** Last argument is variable length (can be shorter than 102 /** Last argument is variable length (can be shorter than
95 arg->size) */ 103 arg->size) */
96 unsigned argvar:1; 104 unsigned argvar:1;
@@ -108,15 +116,23 @@ struct fuse_out {
108 struct fuse_arg args[3]; 116 struct fuse_arg args[3];
109}; 117};
110 118
111struct fuse_req; 119/** The request state */
120enum fuse_req_state {
121 FUSE_REQ_INIT = 0,
122 FUSE_REQ_PENDING,
123 FUSE_REQ_READING,
124 FUSE_REQ_SENT,
125 FUSE_REQ_FINISHED
126};
127
112struct fuse_conn; 128struct fuse_conn;
113 129
114/** 130/**
115 * A request to the client 131 * A request to the client
116 */ 132 */
117struct fuse_req { 133struct fuse_req {
118 /** This can be on either unused_list, pending or processing 134 /** This can be on either unused_list, pending processing or
119 lists in fuse_conn */ 135 io lists in fuse_conn */
120 struct list_head list; 136 struct list_head list;
121 137
122 /** Entry on the background list */ 138 /** Entry on the background list */
@@ -125,6 +141,12 @@ struct fuse_req {
125 /** refcount */ 141 /** refcount */
126 atomic_t count; 142 atomic_t count;
127 143
144 /*
145 * The following bitfields are either set once before the
146 * request is queued or setting/clearing them is protected by
147 * fuse_lock
148 */
149
128 /** True if the request has reply */ 150 /** True if the request has reply */
129 unsigned isreply:1; 151 unsigned isreply:1;
130 152
@@ -140,11 +162,8 @@ struct fuse_req {
140 /** Data is being copied to/from the request */ 162 /** Data is being copied to/from the request */
141 unsigned locked:1; 163 unsigned locked:1;
142 164
143 /** Request has been sent to userspace */ 165 /** State of the request */
144 unsigned sent:1; 166 enum fuse_req_state state;
145
146 /** The request is finished */
147 unsigned finished:1;
148 167
149 /** The request input */ 168 /** The request input */
150 struct fuse_in in; 169 struct fuse_in in;
@@ -159,7 +178,9 @@ struct fuse_req {
159 union { 178 union {
160 struct fuse_forget_in forget_in; 179 struct fuse_forget_in forget_in;
161 struct fuse_release_in release_in; 180 struct fuse_release_in release_in;
162 struct fuse_init_in_out init_in_out; 181 struct fuse_init_in init_in;
182 struct fuse_init_out init_out;
183 struct fuse_read_in read_in;
163 } misc; 184 } misc;
164 185
165 /** page vector */ 186 /** page vector */
@@ -179,6 +200,9 @@ struct fuse_req {
179 200
180 /** File used in the request (or NULL) */ 201 /** File used in the request (or NULL) */
181 struct file *file; 202 struct file *file;
203
204 /** Request completion callback */
205 void (*end)(struct fuse_conn *, struct fuse_req *);
182}; 206};
183 207
184/** 208/**
@@ -189,9 +213,6 @@ struct fuse_req {
189 * unmounted. 213 * unmounted.
190 */ 214 */
191struct fuse_conn { 215struct fuse_conn {
192 /** Reference count */
193 int count;
194
195 /** The user id for this mount */ 216 /** The user id for this mount */
196 uid_t user_id; 217 uid_t user_id;
197 218
@@ -216,6 +237,9 @@ struct fuse_conn {
216 /** The list of requests being processed */ 237 /** The list of requests being processed */
217 struct list_head processing; 238 struct list_head processing;
218 239
240 /** The list of requests under I/O */
241 struct list_head io;
242
219 /** Requests put in the background (RELEASE or any other 243 /** Requests put in the background (RELEASE or any other
220 interrupted request) */ 244 interrupted request) */
221 struct list_head background; 245 struct list_head background;
@@ -237,14 +261,22 @@ struct fuse_conn {
237 u64 reqctr; 261 u64 reqctr;
238 262
239 /** Mount is active */ 263 /** Mount is active */
240 unsigned mounted : 1; 264 unsigned mounted;
241 265
242 /** Connection established */ 266 /** Connection established, cleared on umount, connection
243 unsigned connected : 1; 267 abort and device release */
268 unsigned connected;
244 269
245 /** Connection failed (version mismatch) */ 270 /** Connection failed (version mismatch). Cannot race with
271 setting other bitfields since it is only set once in INIT
272 reply, before any other request, and never cleared */
246 unsigned conn_error : 1; 273 unsigned conn_error : 1;
247 274
275 /*
276 * The following bitfields are only for optimization purposes
277 * and hence races in setting them will not cause malfunction
278 */
279
248 /** Is fsync not implemented by fs? */ 280 /** Is fsync not implemented by fs? */
249 unsigned no_fsync : 1; 281 unsigned no_fsync : 1;
250 282
@@ -272,18 +304,22 @@ struct fuse_conn {
272 /** Is create not implemented by fs? */ 304 /** Is create not implemented by fs? */
273 unsigned no_create : 1; 305 unsigned no_create : 1;
274 306
307 /** The number of requests waiting for completion */
308 atomic_t num_waiting;
309
310 /** Negotiated minor version */
311 unsigned minor;
312
275 /** Backing dev info */ 313 /** Backing dev info */
276 struct backing_dev_info bdi; 314 struct backing_dev_info bdi;
277};
278 315
279static inline struct fuse_conn **get_fuse_conn_super_p(struct super_block *sb) 316 /** kobject */
280{ 317 struct kobject kobj;
281 return (struct fuse_conn **) &sb->s_fs_info; 318};
282}
283 319
284static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) 320static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
285{ 321{
286 return *get_fuse_conn_super_p(sb); 322 return sb->s_fs_info;
287} 323}
288 324
289static inline struct fuse_conn *get_fuse_conn(struct inode *inode) 325static inline struct fuse_conn *get_fuse_conn(struct inode *inode)
@@ -291,6 +327,11 @@ static inline struct fuse_conn *get_fuse_conn(struct inode *inode)
291 return get_fuse_conn_super(inode->i_sb); 327 return get_fuse_conn_super(inode->i_sb);
292} 328}
293 329
330static inline struct fuse_conn *get_fuse_conn_kobj(struct kobject *obj)
331{
332 return container_of(obj, struct fuse_conn, kobj);
333}
334
294static inline struct fuse_inode *get_fuse_inode(struct inode *inode) 335static inline struct fuse_inode *get_fuse_inode(struct inode *inode)
295{ 336{
296 return container_of(inode, struct fuse_inode, inode); 337 return container_of(inode, struct fuse_inode, inode);
@@ -332,11 +373,10 @@ void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
332 unsigned long nodeid, u64 nlookup); 373 unsigned long nodeid, u64 nlookup);
333 374
334/** 375/**
335 * Send READ or READDIR request 376 * Initialize READ or READDIR request
336 */ 377 */
337size_t fuse_send_read_common(struct fuse_req *req, struct file *file, 378void fuse_read_fill(struct fuse_req *req, struct file *file,
338 struct inode *inode, loff_t pos, size_t count, 379 struct inode *inode, loff_t pos, size_t count, int opcode);
339 int isdir);
340 380
341/** 381/**
342 * Send OPEN or OPENDIR request 382 * Send OPEN or OPENDIR request
@@ -391,12 +431,6 @@ void fuse_init_symlink(struct inode *inode);
391void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr); 431void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr);
392 432
393/** 433/**
394 * Check if the connection can be released, and if yes, then free the
395 * connection structure
396 */
397void fuse_release_conn(struct fuse_conn *fc);
398
399/**
400 * Initialize the client device 434 * Initialize the client device
401 */ 435 */
402int fuse_dev_init(void); 436int fuse_dev_init(void);
@@ -452,6 +486,9 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req);
452 */ 486 */
453void fuse_release_background(struct fuse_req *req); 487void fuse_release_background(struct fuse_req *req);
454 488
489/* Abort all requests */
490void fuse_abort_conn(struct fuse_conn *fc);
491
455/** 492/**
456 * Get the attributes of a file 493 * Get the attributes of a file
457 */ 494 */
@@ -461,8 +498,3 @@ int fuse_do_getattr(struct inode *inode);
461 * Invalidate inode attributes 498 * Invalidate inode attributes
462 */ 499 */
463void fuse_invalidate_attr(struct inode *inode); 500void fuse_invalidate_attr(struct inode *inode);
464
465/**
466 * Send the INIT message
467 */
468void fuse_send_init(struct fuse_conn *fc);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e69a546844d0..c755a0440a66 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -24,6 +24,13 @@ MODULE_LICENSE("GPL");
24 24
25spinlock_t fuse_lock; 25spinlock_t fuse_lock;
26static kmem_cache_t *fuse_inode_cachep; 26static kmem_cache_t *fuse_inode_cachep;
27static struct subsystem connections_subsys;
28
29struct fuse_conn_attr {
30 struct attribute attr;
31 ssize_t (*show)(struct fuse_conn *, char *);
32 ssize_t (*store)(struct fuse_conn *, const char *, size_t);
33};
27 34
28#define FUSE_SUPER_MAGIC 0x65735546 35#define FUSE_SUPER_MAGIC 0x65735546
29 36
@@ -135,12 +142,8 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
135 fuse_init_common(inode); 142 fuse_init_common(inode);
136 init_special_inode(inode, inode->i_mode, 143 init_special_inode(inode, inode->i_mode,
137 new_decode_dev(attr->rdev)); 144 new_decode_dev(attr->rdev));
138 } else { 145 } else
139 /* Don't let user create weird files */ 146 BUG();
140 inode->i_mode = S_IFREG;
141 fuse_init_common(inode);
142 fuse_init_file_inode(inode);
143 }
144} 147}
145 148
146static int fuse_inode_eq(struct inode *inode, void *_nodeidp) 149static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
@@ -193,6 +196,11 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
193 return inode; 196 return inode;
194} 197}
195 198
199static void fuse_umount_begin(struct super_block *sb)
200{
201 fuse_abort_conn(get_fuse_conn_super(sb));
202}
203
196static void fuse_put_super(struct super_block *sb) 204static void fuse_put_super(struct super_block *sb)
197{ 205{
198 struct fuse_conn *fc = get_fuse_conn_super(sb); 206 struct fuse_conn *fc = get_fuse_conn_super(sb);
@@ -204,20 +212,20 @@ static void fuse_put_super(struct super_block *sb)
204 212
205 spin_lock(&fuse_lock); 213 spin_lock(&fuse_lock);
206 fc->mounted = 0; 214 fc->mounted = 0;
207 fc->user_id = 0; 215 fc->connected = 0;
208 fc->group_id = 0; 216 spin_unlock(&fuse_lock);
209 fc->flags = 0; 217 up_write(&fc->sbput_sem);
210 /* Flush all readers on this fs */ 218 /* Flush all readers on this fs */
211 wake_up_all(&fc->waitq); 219 wake_up_all(&fc->waitq);
212 up_write(&fc->sbput_sem); 220 kobject_del(&fc->kobj);
213 fuse_release_conn(fc); 221 kobject_put(&fc->kobj);
214 spin_unlock(&fuse_lock);
215} 222}
216 223
217static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) 224static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
218{ 225{
219 stbuf->f_type = FUSE_SUPER_MAGIC; 226 stbuf->f_type = FUSE_SUPER_MAGIC;
220 stbuf->f_bsize = attr->bsize; 227 stbuf->f_bsize = attr->bsize;
228 stbuf->f_frsize = attr->frsize;
221 stbuf->f_blocks = attr->blocks; 229 stbuf->f_blocks = attr->blocks;
222 stbuf->f_bfree = attr->bfree; 230 stbuf->f_bfree = attr->bfree;
223 stbuf->f_bavail = attr->bavail; 231 stbuf->f_bavail = attr->bavail;
@@ -238,10 +246,12 @@ static int fuse_statfs(struct super_block *sb, struct kstatfs *buf)
238 if (!req) 246 if (!req)
239 return -EINTR; 247 return -EINTR;
240 248
249 memset(&outarg, 0, sizeof(outarg));
241 req->in.numargs = 0; 250 req->in.numargs = 0;
242 req->in.h.opcode = FUSE_STATFS; 251 req->in.h.opcode = FUSE_STATFS;
243 req->out.numargs = 1; 252 req->out.numargs = 1;
244 req->out.args[0].size = sizeof(outarg); 253 req->out.args[0].size =
254 fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
245 req->out.args[0].value = &outarg; 255 req->out.args[0].value = &outarg;
246 request_send(fc, req); 256 request_send(fc, req);
247 err = req->out.h.error; 257 err = req->out.h.error;
@@ -357,8 +367,10 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
357 return 0; 367 return 0;
358} 368}
359 369
360static void free_conn(struct fuse_conn *fc) 370static void fuse_conn_release(struct kobject *kobj)
361{ 371{
372 struct fuse_conn *fc = get_fuse_conn_kobj(kobj);
373
362 while (!list_empty(&fc->unused_list)) { 374 while (!list_empty(&fc->unused_list)) {
363 struct fuse_req *req; 375 struct fuse_req *req;
364 req = list_entry(fc->unused_list.next, struct fuse_req, list); 376 req = list_entry(fc->unused_list.next, struct fuse_req, list);
@@ -368,33 +380,28 @@ static void free_conn(struct fuse_conn *fc)
368 kfree(fc); 380 kfree(fc);
369} 381}
370 382
371/* Must be called with the fuse lock held */
372void fuse_release_conn(struct fuse_conn *fc)
373{
374 fc->count--;
375 if (!fc->count)
376 free_conn(fc);
377}
378
379static struct fuse_conn *new_conn(void) 383static struct fuse_conn *new_conn(void)
380{ 384{
381 struct fuse_conn *fc; 385 struct fuse_conn *fc;
382 386
383 fc = kmalloc(sizeof(*fc), GFP_KERNEL); 387 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
384 if (fc != NULL) { 388 if (fc) {
385 int i; 389 int i;
386 memset(fc, 0, sizeof(*fc));
387 init_waitqueue_head(&fc->waitq); 390 init_waitqueue_head(&fc->waitq);
388 INIT_LIST_HEAD(&fc->pending); 391 INIT_LIST_HEAD(&fc->pending);
389 INIT_LIST_HEAD(&fc->processing); 392 INIT_LIST_HEAD(&fc->processing);
393 INIT_LIST_HEAD(&fc->io);
390 INIT_LIST_HEAD(&fc->unused_list); 394 INIT_LIST_HEAD(&fc->unused_list);
391 INIT_LIST_HEAD(&fc->background); 395 INIT_LIST_HEAD(&fc->background);
392 sema_init(&fc->outstanding_sem, 0); 396 sema_init(&fc->outstanding_sem, 1); /* One for INIT */
393 init_rwsem(&fc->sbput_sem); 397 init_rwsem(&fc->sbput_sem);
398 kobj_set_kset_s(fc, connections_subsys);
399 kobject_init(&fc->kobj);
400 atomic_set(&fc->num_waiting, 0);
394 for (i = 0; i < FUSE_MAX_OUTSTANDING; i++) { 401 for (i = 0; i < FUSE_MAX_OUTSTANDING; i++) {
395 struct fuse_req *req = fuse_request_alloc(); 402 struct fuse_req *req = fuse_request_alloc();
396 if (!req) { 403 if (!req) {
397 free_conn(fc); 404 kobject_put(&fc->kobj);
398 return NULL; 405 return NULL;
399 } 406 }
400 list_add(&req->list, &fc->unused_list); 407 list_add(&req->list, &fc->unused_list);
@@ -409,25 +416,32 @@ static struct fuse_conn *new_conn(void)
409static struct fuse_conn *get_conn(struct file *file, struct super_block *sb) 416static struct fuse_conn *get_conn(struct file *file, struct super_block *sb)
410{ 417{
411 struct fuse_conn *fc; 418 struct fuse_conn *fc;
419 int err;
412 420
421 err = -EINVAL;
413 if (file->f_op != &fuse_dev_operations) 422 if (file->f_op != &fuse_dev_operations)
414 return ERR_PTR(-EINVAL); 423 goto out_err;
424
425 err = -ENOMEM;
415 fc = new_conn(); 426 fc = new_conn();
416 if (fc == NULL) 427 if (!fc)
417 return ERR_PTR(-ENOMEM); 428 goto out_err;
429
418 spin_lock(&fuse_lock); 430 spin_lock(&fuse_lock);
419 if (file->private_data) { 431 err = -EINVAL;
420 free_conn(fc); 432 if (file->private_data)
421 fc = ERR_PTR(-EINVAL); 433 goto out_unlock;
422 } else { 434
423 file->private_data = fc; 435 kobject_get(&fc->kobj);
424 *get_fuse_conn_super_p(sb) = fc; 436 file->private_data = fc;
425 fc->mounted = 1;
426 fc->connected = 1;
427 fc->count = 2;
428 }
429 spin_unlock(&fuse_lock); 437 spin_unlock(&fuse_lock);
430 return fc; 438 return fc;
439
440 out_unlock:
441 spin_unlock(&fuse_lock);
442 kobject_put(&fc->kobj);
443 out_err:
444 return ERR_PTR(err);
431} 445}
432 446
433static struct inode *get_root_inode(struct super_block *sb, unsigned mode) 447static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
@@ -446,16 +460,74 @@ static struct super_operations fuse_super_operations = {
446 .read_inode = fuse_read_inode, 460 .read_inode = fuse_read_inode,
447 .clear_inode = fuse_clear_inode, 461 .clear_inode = fuse_clear_inode,
448 .put_super = fuse_put_super, 462 .put_super = fuse_put_super,
463 .umount_begin = fuse_umount_begin,
449 .statfs = fuse_statfs, 464 .statfs = fuse_statfs,
450 .show_options = fuse_show_options, 465 .show_options = fuse_show_options,
451}; 466};
452 467
468static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
469{
470 int i;
471 struct fuse_init_out *arg = &req->misc.init_out;
472
473 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
474 fc->conn_error = 1;
475 else {
476 fc->minor = arg->minor;
477 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
478 }
479
480 /* After INIT reply is received other requests can go
481 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
482 up()s on outstanding_sem. The last up() is done in
483 fuse_putback_request() */
484 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
485 up(&fc->outstanding_sem);
486
487 fuse_put_request(fc, req);
488}
489
490static void fuse_send_init(struct fuse_conn *fc)
491{
492 /* This is called from fuse_read_super() so there's guaranteed
493 to be exactly one request available */
494 struct fuse_req *req = fuse_get_request(fc);
495 struct fuse_init_in *arg = &req->misc.init_in;
496
497 arg->major = FUSE_KERNEL_VERSION;
498 arg->minor = FUSE_KERNEL_MINOR_VERSION;
499 req->in.h.opcode = FUSE_INIT;
500 req->in.numargs = 1;
501 req->in.args[0].size = sizeof(*arg);
502 req->in.args[0].value = arg;
503 req->out.numargs = 1;
504 /* Variable length arguement used for backward compatibility
505 with interface version < 7.5. Rest of init_out is zeroed
506 by do_get_request(), so a short reply is not a problem */
507 req->out.argvar = 1;
508 req->out.args[0].size = sizeof(struct fuse_init_out);
509 req->out.args[0].value = &req->misc.init_out;
510 req->end = process_init_reply;
511 request_send_background(fc, req);
512}
513
514static unsigned long long conn_id(void)
515{
516 static unsigned long long ctr = 1;
517 unsigned long long val;
518 spin_lock(&fuse_lock);
519 val = ctr++;
520 spin_unlock(&fuse_lock);
521 return val;
522}
523
453static int fuse_fill_super(struct super_block *sb, void *data, int silent) 524static int fuse_fill_super(struct super_block *sb, void *data, int silent)
454{ 525{
455 struct fuse_conn *fc; 526 struct fuse_conn *fc;
456 struct inode *root; 527 struct inode *root;
457 struct fuse_mount_data d; 528 struct fuse_mount_data d;
458 struct file *file; 529 struct file *file;
530 struct dentry *root_dentry;
459 int err; 531 int err;
460 532
461 if (!parse_fuse_opt((char *) data, &d)) 533 if (!parse_fuse_opt((char *) data, &d))
@@ -482,25 +554,43 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
482 fc->max_read = d.max_read; 554 fc->max_read = d.max_read;
483 if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages) 555 if (fc->max_read / PAGE_CACHE_SIZE < fc->bdi.ra_pages)
484 fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE; 556 fc->bdi.ra_pages = fc->max_read / PAGE_CACHE_SIZE;
485 fc->max_write = FUSE_MAX_IN / 2; 557
558 /* Used by get_root_inode() */
559 sb->s_fs_info = fc;
486 560
487 err = -ENOMEM; 561 err = -ENOMEM;
488 root = get_root_inode(sb, d.rootmode); 562 root = get_root_inode(sb, d.rootmode);
489 if (root == NULL) 563 if (!root)
490 goto err; 564 goto err;
491 565
492 sb->s_root = d_alloc_root(root); 566 root_dentry = d_alloc_root(root);
493 if (!sb->s_root) { 567 if (!root_dentry) {
494 iput(root); 568 iput(root);
495 goto err; 569 goto err;
496 } 570 }
571
572 err = kobject_set_name(&fc->kobj, "%llu", conn_id());
573 if (err)
574 goto err_put_root;
575
576 err = kobject_add(&fc->kobj);
577 if (err)
578 goto err_put_root;
579
580 sb->s_root = root_dentry;
581 spin_lock(&fuse_lock);
582 fc->mounted = 1;
583 fc->connected = 1;
584 spin_unlock(&fuse_lock);
585
497 fuse_send_init(fc); 586 fuse_send_init(fc);
587
498 return 0; 588 return 0;
499 589
590 err_put_root:
591 dput(root_dentry);
500 err: 592 err:
501 spin_lock(&fuse_lock); 593 kobject_put(&fc->kobj);
502 fuse_release_conn(fc);
503 spin_unlock(&fuse_lock);
504 return err; 594 return err;
505} 595}
506 596
@@ -518,6 +608,69 @@ static struct file_system_type fuse_fs_type = {
518 .kill_sb = kill_anon_super, 608 .kill_sb = kill_anon_super,
519}; 609};
520 610
611static ssize_t fuse_conn_waiting_show(struct fuse_conn *fc, char *page)
612{
613 return sprintf(page, "%i\n", atomic_read(&fc->num_waiting));
614}
615
616static ssize_t fuse_conn_abort_store(struct fuse_conn *fc, const char *page,
617 size_t count)
618{
619 fuse_abort_conn(fc);
620 return count;
621}
622
623static struct fuse_conn_attr fuse_conn_waiting =
624 __ATTR(waiting, 0400, fuse_conn_waiting_show, NULL);
625static struct fuse_conn_attr fuse_conn_abort =
626 __ATTR(abort, 0600, NULL, fuse_conn_abort_store);
627
628static struct attribute *fuse_conn_attrs[] = {
629 &fuse_conn_waiting.attr,
630 &fuse_conn_abort.attr,
631 NULL,
632};
633
634static ssize_t fuse_conn_attr_show(struct kobject *kobj,
635 struct attribute *attr,
636 char *page)
637{
638 struct fuse_conn_attr *fca =
639 container_of(attr, struct fuse_conn_attr, attr);
640
641 if (fca->show)
642 return fca->show(get_fuse_conn_kobj(kobj), page);
643 else
644 return -EACCES;
645}
646
647static ssize_t fuse_conn_attr_store(struct kobject *kobj,
648 struct attribute *attr,
649 const char *page, size_t count)
650{
651 struct fuse_conn_attr *fca =
652 container_of(attr, struct fuse_conn_attr, attr);
653
654 if (fca->store)
655 return fca->store(get_fuse_conn_kobj(kobj), page, count);
656 else
657 return -EACCES;
658}
659
660static struct sysfs_ops fuse_conn_sysfs_ops = {
661 .show = &fuse_conn_attr_show,
662 .store = &fuse_conn_attr_store,
663};
664
665static struct kobj_type ktype_fuse_conn = {
666 .release = fuse_conn_release,
667 .sysfs_ops = &fuse_conn_sysfs_ops,
668 .default_attrs = fuse_conn_attrs,
669};
670
671static decl_subsys(fuse, NULL, NULL);
672static decl_subsys(connections, &ktype_fuse_conn, NULL);
673
521static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, 674static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
522 unsigned long flags) 675 unsigned long flags)
523{ 676{
@@ -555,6 +708,34 @@ static void fuse_fs_cleanup(void)
555 kmem_cache_destroy(fuse_inode_cachep); 708 kmem_cache_destroy(fuse_inode_cachep);
556} 709}
557 710
711static int fuse_sysfs_init(void)
712{
713 int err;
714
715 kset_set_kset_s(&fuse_subsys, fs_subsys);
716 err = subsystem_register(&fuse_subsys);
717 if (err)
718 goto out_err;
719
720 kset_set_kset_s(&connections_subsys, fuse_subsys);
721 err = subsystem_register(&connections_subsys);
722 if (err)
723 goto out_fuse_unregister;
724
725 return 0;
726
727 out_fuse_unregister:
728 subsystem_unregister(&fuse_subsys);
729 out_err:
730 return err;
731}
732
733static void fuse_sysfs_cleanup(void)
734{
735 subsystem_unregister(&connections_subsys);
736 subsystem_unregister(&fuse_subsys);
737}
738
558static int __init fuse_init(void) 739static int __init fuse_init(void)
559{ 740{
560 int res; 741 int res;
@@ -571,8 +752,14 @@ static int __init fuse_init(void)
571 if (res) 752 if (res)
572 goto err_fs_cleanup; 753 goto err_fs_cleanup;
573 754
755 res = fuse_sysfs_init();
756 if (res)
757 goto err_dev_cleanup;
758
574 return 0; 759 return 0;
575 760
761 err_dev_cleanup:
762 fuse_dev_cleanup();
576 err_fs_cleanup: 763 err_fs_cleanup:
577 fuse_fs_cleanup(); 764 fuse_fs_cleanup();
578 err: 765 err:
@@ -583,6 +770,7 @@ static void __exit fuse_exit(void)
583{ 770{
584 printk(KERN_DEBUG "fuse exit\n"); 771 printk(KERN_DEBUG "fuse exit\n");
585 772
773 fuse_sysfs_cleanup();
586 fuse_fs_cleanup(); 774 fuse_fs_cleanup();
587 fuse_dev_cleanup(); 775 fuse_dev_cleanup();
588} 776}