aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/Makefile2
-rw-r--r--fs/fuse/control.c218
-rw-r--r--fs/fuse/dev.c418
-rw-r--r--fs/fuse/dir.c56
-rw-r--r--fs/fuse/file.c206
-rw-r--r--fs/fuse/fuse_i.h135
-rw-r--r--fs/fuse/inode.c178
7 files changed, 842 insertions, 371 deletions
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index c3e1f760cac9..72437065f6ad 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_FUSE_FS) += fuse.o 5obj-$(CONFIG_FUSE_FS) += fuse.o
6 6
7fuse-objs := dev.o dir.o file.o inode.o 7fuse-objs := dev.o dir.o file.o inode.o control.o
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
new file mode 100644
index 000000000000..a3bce3a77253
--- /dev/null
+++ b/fs/fuse/control.c
@@ -0,0 +1,218 @@
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13
14#define FUSE_CTL_SUPER_MAGIC 0x65735543
15
16/*
17 * This is non-NULL when the single instance of the control filesystem
18 * exists. Protected by fuse_mutex
19 */
20static struct super_block *fuse_control_sb;
21
22static struct fuse_conn *fuse_ctl_file_conn_get(struct file *file)
23{
24 struct fuse_conn *fc;
25 mutex_lock(&fuse_mutex);
26 fc = file->f_dentry->d_inode->u.generic_ip;
27 if (fc)
28 fc = fuse_conn_get(fc);
29 mutex_unlock(&fuse_mutex);
30 return fc;
31}
32
33static ssize_t fuse_conn_abort_write(struct file *file, const char __user *buf,
34 size_t count, loff_t *ppos)
35{
36 struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
37 if (fc) {
38 fuse_abort_conn(fc);
39 fuse_conn_put(fc);
40 }
41 return count;
42}
43
44static ssize_t fuse_conn_waiting_read(struct file *file, char __user *buf,
45 size_t len, loff_t *ppos)
46{
47 char tmp[32];
48 size_t size;
49
50 if (!*ppos) {
51 struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
52 if (!fc)
53 return 0;
54
55 file->private_data=(void *)(long)atomic_read(&fc->num_waiting);
56 fuse_conn_put(fc);
57 }
58 size = sprintf(tmp, "%ld\n", (long)file->private_data);
59 return simple_read_from_buffer(buf, len, ppos, tmp, size);
60}
61
62static const struct file_operations fuse_ctl_abort_ops = {
63 .open = nonseekable_open,
64 .write = fuse_conn_abort_write,
65};
66
67static const struct file_operations fuse_ctl_waiting_ops = {
68 .open = nonseekable_open,
69 .read = fuse_conn_waiting_read,
70};
71
72static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
73 struct fuse_conn *fc,
74 const char *name,
75 int mode, int nlink,
76 struct inode_operations *iop,
77 const struct file_operations *fop)
78{
79 struct dentry *dentry;
80 struct inode *inode;
81
82 BUG_ON(fc->ctl_ndents >= FUSE_CTL_NUM_DENTRIES);
83 dentry = d_alloc_name(parent, name);
84 if (!dentry)
85 return NULL;
86
87 fc->ctl_dentry[fc->ctl_ndents++] = dentry;
88 inode = new_inode(fuse_control_sb);
89 if (!inode)
90 return NULL;
91
92 inode->i_mode = mode;
93 inode->i_uid = fc->user_id;
94 inode->i_gid = fc->group_id;
95 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
96 /* setting ->i_op to NULL is not allowed */
97 if (iop)
98 inode->i_op = iop;
99 inode->i_fop = fop;
100 inode->i_nlink = nlink;
101 inode->u.generic_ip = fc;
102 d_add(dentry, inode);
103 return dentry;
104}
105
106/*
107 * Add a connection to the control filesystem (if it exists). Caller
108 * must host fuse_mutex
109 */
110int fuse_ctl_add_conn(struct fuse_conn *fc)
111{
112 struct dentry *parent;
113 char name[32];
114
115 if (!fuse_control_sb)
116 return 0;
117
118 parent = fuse_control_sb->s_root;
119 parent->d_inode->i_nlink++;
120 sprintf(name, "%llu", (unsigned long long) fc->id);
121 parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2,
122 &simple_dir_inode_operations,
123 &simple_dir_operations);
124 if (!parent)
125 goto err;
126
127 if (!fuse_ctl_add_dentry(parent, fc, "waiting", S_IFREG | 0400, 1,
128 NULL, &fuse_ctl_waiting_ops) ||
129 !fuse_ctl_add_dentry(parent, fc, "abort", S_IFREG | 0200, 1,
130 NULL, &fuse_ctl_abort_ops))
131 goto err;
132
133 return 0;
134
135 err:
136 fuse_ctl_remove_conn(fc);
137 return -ENOMEM;
138}
139
140/*
141 * Remove a connection from the control filesystem (if it exists).
142 * Caller must host fuse_mutex
143 */
144void fuse_ctl_remove_conn(struct fuse_conn *fc)
145{
146 int i;
147
148 if (!fuse_control_sb)
149 return;
150
151 for (i = fc->ctl_ndents - 1; i >= 0; i--) {
152 struct dentry *dentry = fc->ctl_dentry[i];
153 dentry->d_inode->u.generic_ip = NULL;
154 d_drop(dentry);
155 dput(dentry);
156 }
157 fuse_control_sb->s_root->d_inode->i_nlink--;
158}
159
160static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
161{
162 struct tree_descr empty_descr = {""};
163 struct fuse_conn *fc;
164 int err;
165
166 err = simple_fill_super(sb, FUSE_CTL_SUPER_MAGIC, &empty_descr);
167 if (err)
168 return err;
169
170 mutex_lock(&fuse_mutex);
171 BUG_ON(fuse_control_sb);
172 fuse_control_sb = sb;
173 list_for_each_entry(fc, &fuse_conn_list, entry) {
174 err = fuse_ctl_add_conn(fc);
175 if (err) {
176 fuse_control_sb = NULL;
177 mutex_unlock(&fuse_mutex);
178 return err;
179 }
180 }
181 mutex_unlock(&fuse_mutex);
182
183 return 0;
184}
185
186static int fuse_ctl_get_sb(struct file_system_type *fs_type, int flags,
187 const char *dev_name, void *raw_data,
188 struct vfsmount *mnt)
189{
190 return get_sb_single(fs_type, flags, raw_data,
191 fuse_ctl_fill_super, mnt);
192}
193
194static void fuse_ctl_kill_sb(struct super_block *sb)
195{
196 mutex_lock(&fuse_mutex);
197 fuse_control_sb = NULL;
198 mutex_unlock(&fuse_mutex);
199
200 kill_litter_super(sb);
201}
202
203static struct file_system_type fuse_ctl_fs_type = {
204 .owner = THIS_MODULE,
205 .name = "fusectl",
206 .get_sb = fuse_ctl_get_sb,
207 .kill_sb = fuse_ctl_kill_sb,
208};
209
210int __init fuse_ctl_init(void)
211{
212 return register_filesystem(&fuse_ctl_fs_type);
213}
214
215void fuse_ctl_cleanup(void)
216{
217 unregister_filesystem(&fuse_ctl_fs_type);
218}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 104a62dadb94..1e2006caf158 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -34,6 +34,7 @@ static void fuse_request_init(struct fuse_req *req)
34{ 34{
35 memset(req, 0, sizeof(*req)); 35 memset(req, 0, sizeof(*req));
36 INIT_LIST_HEAD(&req->list); 36 INIT_LIST_HEAD(&req->list);
37 INIT_LIST_HEAD(&req->intr_entry);
37 init_waitqueue_head(&req->waitq); 38 init_waitqueue_head(&req->waitq);
38 atomic_set(&req->count, 1); 39 atomic_set(&req->count, 1);
39} 40}
@@ -64,18 +65,6 @@ static void restore_sigs(sigset_t *oldset)
64 sigprocmask(SIG_SETMASK, oldset, NULL); 65 sigprocmask(SIG_SETMASK, oldset, NULL);
65} 66}
66 67
67/*
68 * Reset request, so that it can be reused
69 *
70 * The caller must be _very_ careful to make sure, that it is holding
71 * the only reference to req
72 */
73void fuse_reset_request(struct fuse_req *req)
74{
75 BUG_ON(atomic_read(&req->count) != 1);
76 fuse_request_init(req);
77}
78
79static void __fuse_get_request(struct fuse_req *req) 68static void __fuse_get_request(struct fuse_req *req)
80{ 69{
81 atomic_inc(&req->count); 70 atomic_inc(&req->count);
@@ -88,6 +77,13 @@ static void __fuse_put_request(struct fuse_req *req)
88 atomic_dec(&req->count); 77 atomic_dec(&req->count);
89} 78}
90 79
80static void fuse_req_init_context(struct fuse_req *req)
81{
82 req->in.h.uid = current->fsuid;
83 req->in.h.gid = current->fsgid;
84 req->in.h.pid = current->pid;
85}
86
91struct fuse_req *fuse_get_req(struct fuse_conn *fc) 87struct fuse_req *fuse_get_req(struct fuse_conn *fc)
92{ 88{
93 struct fuse_req *req; 89 struct fuse_req *req;
@@ -103,14 +99,16 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
103 if (intr) 99 if (intr)
104 goto out; 100 goto out;
105 101
102 err = -ENOTCONN;
103 if (!fc->connected)
104 goto out;
105
106 req = fuse_request_alloc(); 106 req = fuse_request_alloc();
107 err = -ENOMEM; 107 err = -ENOMEM;
108 if (!req) 108 if (!req)
109 goto out; 109 goto out;
110 110
111 req->in.h.uid = current->fsuid; 111 fuse_req_init_context(req);
112 req->in.h.gid = current->fsgid;
113 req->in.h.pid = current->pid;
114 req->waiting = 1; 112 req->waiting = 1;
115 return req; 113 return req;
116 114
@@ -119,142 +117,183 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
119 return ERR_PTR(err); 117 return ERR_PTR(err);
120} 118}
121 119
122void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 120/*
121 * Return request in fuse_file->reserved_req. However that may
122 * currently be in use. If that is the case, wait for it to become
123 * available.
124 */
125static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
126 struct file *file)
123{ 127{
124 if (atomic_dec_and_test(&req->count)) { 128 struct fuse_req *req = NULL;
125 if (req->waiting) 129 struct fuse_file *ff = file->private_data;
126 atomic_dec(&fc->num_waiting); 130
127 fuse_request_free(req); 131 do {
128 } 132 wait_event(fc->blocked_waitq, ff->reserved_req);
133 spin_lock(&fc->lock);
134 if (ff->reserved_req) {
135 req = ff->reserved_req;
136 ff->reserved_req = NULL;
137 get_file(file);
138 req->stolen_file = file;
139 }
140 spin_unlock(&fc->lock);
141 } while (!req);
142
143 return req;
129} 144}
130 145
131/* 146/*
132 * Called with sbput_sem held for read (request_end) or write 147 * Put stolen request back into fuse_file->reserved_req
133 * (fuse_put_super). By the time fuse_put_super() is finished, all
134 * inodes belonging to background requests must be released, so the
135 * iputs have to be done within the locked region.
136 */ 148 */
137void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) 149static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
138{ 150{
139 iput(req->inode); 151 struct file *file = req->stolen_file;
140 iput(req->inode2); 152 struct fuse_file *ff = file->private_data;
153
141 spin_lock(&fc->lock); 154 spin_lock(&fc->lock);
142 list_del(&req->bg_entry); 155 fuse_request_init(req);
143 if (fc->num_background == FUSE_MAX_BACKGROUND) { 156 BUG_ON(ff->reserved_req);
144 fc->blocked = 0; 157 ff->reserved_req = req;
145 wake_up_all(&fc->blocked_waitq); 158 wake_up(&fc->blocked_waitq);
146 }
147 fc->num_background--;
148 spin_unlock(&fc->lock); 159 spin_unlock(&fc->lock);
160 fput(file);
149} 161}
150 162
151/* 163/*
152 * This function is called when a request is finished. Either a reply 164 * Gets a requests for a file operation, always succeeds
153 * has arrived or it was interrupted (and not yet sent) or some error
154 * occurred during communication with userspace, or the device file
155 * was closed. In case of a background request the reference to the
156 * stored objects are released. The requester thread is woken up (if
157 * still waiting), the 'end' callback is called if given, else the
158 * reference to the request is released
159 * 165 *
160 * Releasing extra reference for foreground requests must be done 166 * This is used for sending the FLUSH request, which must get to
161 * within the same locked region as setting state to finished. This 167 * userspace, due to POSIX locks which may need to be unlocked.
162 * is because fuse_reset_request() may be called after request is
163 * finished and it must be the sole possessor. If request is
164 * interrupted and put in the background, it will return with an error
165 * and hence never be reset and reused.
166 * 168 *
167 * Called with fc->lock, unlocks it 169 * If allocation fails due to OOM, use the reserved request in
170 * fuse_file.
171 *
172 * This is very unlikely to deadlock accidentally, since the
173 * filesystem should not have it's own file open. If deadlock is
174 * intentional, it can still be broken by "aborting" the filesystem.
168 */ 175 */
169static void request_end(struct fuse_conn *fc, struct fuse_req *req) 176struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
170{ 177{
171 list_del(&req->list); 178 struct fuse_req *req;
172 req->state = FUSE_REQ_FINISHED;
173 if (!req->background) {
174 spin_unlock(&fc->lock);
175 wake_up(&req->waitq);
176 fuse_put_request(fc, req);
177 } else {
178 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
179 req->end = NULL;
180 spin_unlock(&fc->lock);
181 down_read(&fc->sbput_sem);
182 if (fc->mounted)
183 fuse_release_background(fc, req);
184 up_read(&fc->sbput_sem);
185 179
186 /* fput must go outside sbput_sem, otherwise it can deadlock */ 180 atomic_inc(&fc->num_waiting);
187 if (req->file) 181 wait_event(fc->blocked_waitq, !fc->blocked);
188 fput(req->file); 182 req = fuse_request_alloc();
183 if (!req)
184 req = get_reserved_req(fc, file);
189 185
190 if (end) 186 fuse_req_init_context(req);
191 end(fc, req); 187 req->waiting = 1;
188 return req;
189}
190
191void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
192{
193 if (atomic_dec_and_test(&req->count)) {
194 if (req->waiting)
195 atomic_dec(&fc->num_waiting);
196
197 if (req->stolen_file)
198 put_reserved_req(fc, req);
192 else 199 else
193 fuse_put_request(fc, req); 200 fuse_request_free(req);
194 } 201 }
195} 202}
196 203
197/* 204/*
198 * Unfortunately request interruption not just solves the deadlock 205 * This function is called when a request is finished. Either a reply
199 * problem, it causes problems too. These stem from the fact, that an 206 * has arrived or it was aborted (and not yet sent) or some error
200 * interrupted request is continued to be processed in userspace, 207 * occurred during communication with userspace, or the device file
201 * while all the locks and object references (inode and file) held 208 * was closed. The requester thread is woken up (if still waiting),
202 * during the operation are released. 209 * the 'end' callback is called if given, else the reference to the
203 * 210 * request is released
204 * To release the locks is exactly why there's a need to interrupt the
205 * request, so there's not a lot that can be done about this, except
206 * introduce additional locking in userspace.
207 *
208 * More important is to keep inode and file references until userspace
209 * has replied, otherwise FORGET and RELEASE could be sent while the
210 * inode/file is still used by the filesystem.
211 *
212 * For this reason the concept of "background" request is introduced.
213 * An interrupted request is backgrounded if it has been already sent
214 * to userspace. Backgrounding involves getting an extra reference to
215 * inode(s) or file used in the request, and adding the request to
216 * fc->background list. When a reply is received for a background
217 * request, the object references are released, and the request is
218 * removed from the list. If the filesystem is unmounted while there
219 * are still background requests, the list is walked and references
220 * are released as if a reply was received.
221 * 211 *
222 * There's one more use for a background request. The RELEASE message is 212 * Called with fc->lock, unlocks it
223 * always sent as background, since it doesn't return an error or
224 * data.
225 */ 213 */
226static void background_request(struct fuse_conn *fc, struct fuse_req *req) 214static void request_end(struct fuse_conn *fc, struct fuse_req *req)
227{ 215{
228 req->background = 1; 216 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
229 list_add(&req->bg_entry, &fc->background); 217 req->end = NULL;
230 fc->num_background++; 218 list_del(&req->list);
231 if (fc->num_background == FUSE_MAX_BACKGROUND) 219 list_del(&req->intr_entry);
232 fc->blocked = 1; 220 req->state = FUSE_REQ_FINISHED;
233 if (req->inode) 221 if (req->background) {
234 req->inode = igrab(req->inode); 222 if (fc->num_background == FUSE_MAX_BACKGROUND) {
235 if (req->inode2) 223 fc->blocked = 0;
236 req->inode2 = igrab(req->inode2); 224 wake_up_all(&fc->blocked_waitq);
225 }
226 fc->num_background--;
227 }
228 spin_unlock(&fc->lock);
229 dput(req->dentry);
230 mntput(req->vfsmount);
237 if (req->file) 231 if (req->file)
238 get_file(req->file); 232 fput(req->file);
233 wake_up(&req->waitq);
234 if (end)
235 end(fc, req);
236 else
237 fuse_put_request(fc, req);
239} 238}
240 239
241/* Called with fc->lock held. Releases, and then reacquires it. */ 240static void wait_answer_interruptible(struct fuse_conn *fc,
242static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) 241 struct fuse_req *req)
243{ 242{
244 sigset_t oldset; 243 if (signal_pending(current))
244 return;
245 245
246 spin_unlock(&fc->lock); 246 spin_unlock(&fc->lock);
247 block_sigs(&oldset);
248 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); 247 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
249 restore_sigs(&oldset);
250 spin_lock(&fc->lock); 248 spin_lock(&fc->lock);
251 if (req->state == FUSE_REQ_FINISHED && !req->interrupted) 249}
252 return; 250
251static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
252{
253 list_add_tail(&req->intr_entry, &fc->interrupts);
254 wake_up(&fc->waitq);
255 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
256}
257
258/* Called with fc->lock held. Releases, and then reacquires it. */
259static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
260{
261 if (!fc->no_interrupt) {
262 /* Any signal may interrupt this */
263 wait_answer_interruptible(fc, req);
264
265 if (req->aborted)
266 goto aborted;
267 if (req->state == FUSE_REQ_FINISHED)
268 return;
253 269
254 if (!req->interrupted) {
255 req->out.h.error = -EINTR;
256 req->interrupted = 1; 270 req->interrupted = 1;
271 if (req->state == FUSE_REQ_SENT)
272 queue_interrupt(fc, req);
273 }
274
275 if (req->force) {
276 spin_unlock(&fc->lock);
277 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
278 spin_lock(&fc->lock);
279 } else {
280 sigset_t oldset;
281
282 /* Only fatal signals may interrupt this */
283 block_sigs(&oldset);
284 wait_answer_interruptible(fc, req);
285 restore_sigs(&oldset);
257 } 286 }
287
288 if (req->aborted)
289 goto aborted;
290 if (req->state == FUSE_REQ_FINISHED)
291 return;
292
293 req->out.h.error = -EINTR;
294 req->aborted = 1;
295
296 aborted:
258 if (req->locked) { 297 if (req->locked) {
259 /* This is uninterruptible sleep, because data is 298 /* This is uninterruptible sleep, because data is
260 being copied to/from the buffers of req. During 299 being copied to/from the buffers of req. During
@@ -268,8 +307,11 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
268 if (req->state == FUSE_REQ_PENDING) { 307 if (req->state == FUSE_REQ_PENDING) {
269 list_del(&req->list); 308 list_del(&req->list);
270 __fuse_put_request(req); 309 __fuse_put_request(req);
271 } else if (req->state == FUSE_REQ_SENT) 310 } else if (req->state == FUSE_REQ_SENT) {
272 background_request(fc, req); 311 spin_unlock(&fc->lock);
312 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
313 spin_lock(&fc->lock);
314 }
273} 315}
274 316
275static unsigned len_args(unsigned numargs, struct fuse_arg *args) 317static unsigned len_args(unsigned numargs, struct fuse_arg *args)
@@ -283,13 +325,19 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args)
283 return nbytes; 325 return nbytes;
284} 326}
285 327
328static u64 fuse_get_unique(struct fuse_conn *fc)
329 {
330 fc->reqctr++;
331 /* zero is special */
332 if (fc->reqctr == 0)
333 fc->reqctr = 1;
334
335 return fc->reqctr;
336}
337
286static void queue_request(struct fuse_conn *fc, struct fuse_req *req) 338static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
287{ 339{
288 fc->reqctr++; 340 req->in.h.unique = fuse_get_unique(fc);
289 /* zero is special */
290 if (fc->reqctr == 0)
291 fc->reqctr = 1;
292 req->in.h.unique = fc->reqctr;
293 req->in.h.len = sizeof(struct fuse_in_header) + 341 req->in.h.len = sizeof(struct fuse_in_header) +
294 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); 342 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
295 list_add_tail(&req->list, &fc->pending); 343 list_add_tail(&req->list, &fc->pending);
@@ -302,9 +350,6 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
302 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 350 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
303} 351}
304 352
305/*
306 * This can only be interrupted by a SIGKILL
307 */
308void request_send(struct fuse_conn *fc, struct fuse_req *req) 353void request_send(struct fuse_conn *fc, struct fuse_req *req)
309{ 354{
310 req->isreply = 1; 355 req->isreply = 1;
@@ -327,8 +372,12 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
327static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) 372static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
328{ 373{
329 spin_lock(&fc->lock); 374 spin_lock(&fc->lock);
330 background_request(fc, req);
331 if (fc->connected) { 375 if (fc->connected) {
376 req->background = 1;
377 fc->num_background++;
378 if (fc->num_background == FUSE_MAX_BACKGROUND)
379 fc->blocked = 1;
380
332 queue_request(fc, req); 381 queue_request(fc, req);
333 spin_unlock(&fc->lock); 382 spin_unlock(&fc->lock);
334 } else { 383 } else {
@@ -352,14 +401,14 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
352/* 401/*
353 * Lock the request. Up to the next unlock_request() there mustn't be 402 * Lock the request. Up to the next unlock_request() there mustn't be
354 * anything that could cause a page-fault. If the request was already 403 * anything that could cause a page-fault. If the request was already
355 * interrupted bail out. 404 * aborted bail out.
356 */ 405 */
357static int lock_request(struct fuse_conn *fc, struct fuse_req *req) 406static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
358{ 407{
359 int err = 0; 408 int err = 0;
360 if (req) { 409 if (req) {
361 spin_lock(&fc->lock); 410 spin_lock(&fc->lock);
362 if (req->interrupted) 411 if (req->aborted)
363 err = -ENOENT; 412 err = -ENOENT;
364 else 413 else
365 req->locked = 1; 414 req->locked = 1;
@@ -369,7 +418,7 @@ static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
369} 418}
370 419
371/* 420/*
372 * Unlock request. If it was interrupted during being locked, the 421 * Unlock request. If it was aborted during being locked, the
373 * requester thread is currently waiting for it to be unlocked, so 422 * requester thread is currently waiting for it to be unlocked, so
374 * wake it up. 423 * wake it up.
375 */ 424 */
@@ -378,7 +427,7 @@ static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
378 if (req) { 427 if (req) {
379 spin_lock(&fc->lock); 428 spin_lock(&fc->lock);
380 req->locked = 0; 429 req->locked = 0;
381 if (req->interrupted) 430 if (req->aborted)
382 wake_up(&req->waitq); 431 wake_up(&req->waitq);
383 spin_unlock(&fc->lock); 432 spin_unlock(&fc->lock);
384 } 433 }
@@ -557,13 +606,18 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
557 return err; 606 return err;
558} 607}
559 608
609static int request_pending(struct fuse_conn *fc)
610{
611 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts);
612}
613
560/* Wait until a request is available on the pending list */ 614/* Wait until a request is available on the pending list */
561static void request_wait(struct fuse_conn *fc) 615static void request_wait(struct fuse_conn *fc)
562{ 616{
563 DECLARE_WAITQUEUE(wait, current); 617 DECLARE_WAITQUEUE(wait, current);
564 618
565 add_wait_queue_exclusive(&fc->waitq, &wait); 619 add_wait_queue_exclusive(&fc->waitq, &wait);
566 while (fc->connected && list_empty(&fc->pending)) { 620 while (fc->connected && !request_pending(fc)) {
567 set_current_state(TASK_INTERRUPTIBLE); 621 set_current_state(TASK_INTERRUPTIBLE);
568 if (signal_pending(current)) 622 if (signal_pending(current))
569 break; 623 break;
@@ -577,11 +631,50 @@ static void request_wait(struct fuse_conn *fc)
577} 631}
578 632
579/* 633/*
634 * Transfer an interrupt request to userspace
635 *
636 * Unlike other requests this is assembled on demand, without a need
637 * to allocate a separate fuse_req structure.
638 *
639 * Called with fc->lock held, releases it
640 */
641static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
642 const struct iovec *iov, unsigned long nr_segs)
643{
644 struct fuse_copy_state cs;
645 struct fuse_in_header ih;
646 struct fuse_interrupt_in arg;
647 unsigned reqsize = sizeof(ih) + sizeof(arg);
648 int err;
649
650 list_del_init(&req->intr_entry);
651 req->intr_unique = fuse_get_unique(fc);
652 memset(&ih, 0, sizeof(ih));
653 memset(&arg, 0, sizeof(arg));
654 ih.len = reqsize;
655 ih.opcode = FUSE_INTERRUPT;
656 ih.unique = req->intr_unique;
657 arg.unique = req->in.h.unique;
658
659 spin_unlock(&fc->lock);
660 if (iov_length(iov, nr_segs) < reqsize)
661 return -EINVAL;
662
663 fuse_copy_init(&cs, fc, 1, NULL, iov, nr_segs);
664 err = fuse_copy_one(&cs, &ih, sizeof(ih));
665 if (!err)
666 err = fuse_copy_one(&cs, &arg, sizeof(arg));
667 fuse_copy_finish(&cs);
668
669 return err ? err : reqsize;
670}
671
672/*
580 * Read a single request into the userspace filesystem's buffer. This 673 * Read a single request into the userspace filesystem's buffer. This
581 * function waits until a request is available, then removes it from 674 * function waits until a request is available, then removes it from
582 * the pending list and copies request data to userspace buffer. If 675 * the pending list and copies request data to userspace buffer. If
583 * no reply is needed (FORGET) or request has been interrupted or 676 * no reply is needed (FORGET) or request has been aborted or there
584 * there was an error during the copying then it's finished by calling 677 * was an error during the copying then it's finished by calling
585 * request_end(). Otherwise add it to the processing list, and set 678 * request_end(). Otherwise add it to the processing list, and set
586 * the 'sent' flag. 679 * the 'sent' flag.
587 */ 680 */
@@ -601,7 +694,7 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
601 spin_lock(&fc->lock); 694 spin_lock(&fc->lock);
602 err = -EAGAIN; 695 err = -EAGAIN;
603 if ((file->f_flags & O_NONBLOCK) && fc->connected && 696 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
604 list_empty(&fc->pending)) 697 !request_pending(fc))
605 goto err_unlock; 698 goto err_unlock;
606 699
607 request_wait(fc); 700 request_wait(fc);
@@ -609,9 +702,15 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
609 if (!fc->connected) 702 if (!fc->connected)
610 goto err_unlock; 703 goto err_unlock;
611 err = -ERESTARTSYS; 704 err = -ERESTARTSYS;
612 if (list_empty(&fc->pending)) 705 if (!request_pending(fc))
613 goto err_unlock; 706 goto err_unlock;
614 707
708 if (!list_empty(&fc->interrupts)) {
709 req = list_entry(fc->interrupts.next, struct fuse_req,
710 intr_entry);
711 return fuse_read_interrupt(fc, req, iov, nr_segs);
712 }
713
615 req = list_entry(fc->pending.next, struct fuse_req, list); 714 req = list_entry(fc->pending.next, struct fuse_req, list);
616 req->state = FUSE_REQ_READING; 715 req->state = FUSE_REQ_READING;
617 list_move(&req->list, &fc->io); 716 list_move(&req->list, &fc->io);
@@ -636,10 +735,10 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
636 fuse_copy_finish(&cs); 735 fuse_copy_finish(&cs);
637 spin_lock(&fc->lock); 736 spin_lock(&fc->lock);
638 req->locked = 0; 737 req->locked = 0;
639 if (!err && req->interrupted) 738 if (!err && req->aborted)
640 err = -ENOENT; 739 err = -ENOENT;
641 if (err) { 740 if (err) {
642 if (!req->interrupted) 741 if (!req->aborted)
643 req->out.h.error = -EIO; 742 req->out.h.error = -EIO;
644 request_end(fc, req); 743 request_end(fc, req);
645 return err; 744 return err;
@@ -649,6 +748,8 @@ static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
649 else { 748 else {
650 req->state = FUSE_REQ_SENT; 749 req->state = FUSE_REQ_SENT;
651 list_move_tail(&req->list, &fc->processing); 750 list_move_tail(&req->list, &fc->processing);
751 if (req->interrupted)
752 queue_interrupt(fc, req);
652 spin_unlock(&fc->lock); 753 spin_unlock(&fc->lock);
653 } 754 }
654 return reqsize; 755 return reqsize;
@@ -675,7 +776,7 @@ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
675 list_for_each(entry, &fc->processing) { 776 list_for_each(entry, &fc->processing) {
676 struct fuse_req *req; 777 struct fuse_req *req;
677 req = list_entry(entry, struct fuse_req, list); 778 req = list_entry(entry, struct fuse_req, list);
678 if (req->in.h.unique == unique) 779 if (req->in.h.unique == unique || req->intr_unique == unique)
679 return req; 780 return req;
680 } 781 }
681 return NULL; 782 return NULL;
@@ -741,17 +842,33 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
741 goto err_unlock; 842 goto err_unlock;
742 843
743 req = request_find(fc, oh.unique); 844 req = request_find(fc, oh.unique);
744 err = -EINVAL;
745 if (!req) 845 if (!req)
746 goto err_unlock; 846 goto err_unlock;
747 847
748 if (req->interrupted) { 848 if (req->aborted) {
749 spin_unlock(&fc->lock); 849 spin_unlock(&fc->lock);
750 fuse_copy_finish(&cs); 850 fuse_copy_finish(&cs);
751 spin_lock(&fc->lock); 851 spin_lock(&fc->lock);
752 request_end(fc, req); 852 request_end(fc, req);
753 return -ENOENT; 853 return -ENOENT;
754 } 854 }
855 /* Is it an interrupt reply? */
856 if (req->intr_unique == oh.unique) {
857 err = -EINVAL;
858 if (nbytes != sizeof(struct fuse_out_header))
859 goto err_unlock;
860
861 if (oh.error == -ENOSYS)
862 fc->no_interrupt = 1;
863 else if (oh.error == -EAGAIN)
864 queue_interrupt(fc, req);
865
866 spin_unlock(&fc->lock);
867 fuse_copy_finish(&cs);
868 return nbytes;
869 }
870
871 req->state = FUSE_REQ_WRITING;
755 list_move(&req->list, &fc->io); 872 list_move(&req->list, &fc->io);
756 req->out.h = oh; 873 req->out.h = oh;
757 req->locked = 1; 874 req->locked = 1;
@@ -764,9 +881,9 @@ static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
764 spin_lock(&fc->lock); 881 spin_lock(&fc->lock);
765 req->locked = 0; 882 req->locked = 0;
766 if (!err) { 883 if (!err) {
767 if (req->interrupted) 884 if (req->aborted)
768 err = -ENOENT; 885 err = -ENOENT;
769 } else if (!req->interrupted) 886 } else if (!req->aborted)
770 req->out.h.error = -EIO; 887 req->out.h.error = -EIO;
771 request_end(fc, req); 888 request_end(fc, req);
772 889
@@ -800,7 +917,7 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
800 spin_lock(&fc->lock); 917 spin_lock(&fc->lock);
801 if (!fc->connected) 918 if (!fc->connected)
802 mask = POLLERR; 919 mask = POLLERR;
803 else if (!list_empty(&fc->pending)) 920 else if (request_pending(fc))
804 mask |= POLLIN | POLLRDNORM; 921 mask |= POLLIN | POLLRDNORM;
805 spin_unlock(&fc->lock); 922 spin_unlock(&fc->lock);
806 923
@@ -826,7 +943,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
826/* 943/*
827 * Abort requests under I/O 944 * Abort requests under I/O
828 * 945 *
829 * The requests are set to interrupted and finished, and the request 946 * The requests are set to aborted and finished, and the request
830 * waiter is woken up. This will make request_wait_answer() wait 947 * waiter is woken up. This will make request_wait_answer() wait
831 * until the request is unlocked and then return. 948 * until the request is unlocked and then return.
832 * 949 *
@@ -841,7 +958,7 @@ static void end_io_requests(struct fuse_conn *fc)
841 list_entry(fc->io.next, struct fuse_req, list); 958 list_entry(fc->io.next, struct fuse_req, list);
842 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; 959 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
843 960
844 req->interrupted = 1; 961 req->aborted = 1;
845 req->out.h.error = -ECONNABORTED; 962 req->out.h.error = -ECONNABORTED;
846 req->state = FUSE_REQ_FINISHED; 963 req->state = FUSE_REQ_FINISHED;
847 list_del_init(&req->list); 964 list_del_init(&req->list);
@@ -874,19 +991,20 @@ static void end_io_requests(struct fuse_conn *fc)
874 * onto the pending list is prevented by req->connected being false. 991 * onto the pending list is prevented by req->connected being false.
875 * 992 *
876 * Progression of requests under I/O to the processing list is 993 * Progression of requests under I/O to the processing list is
877 * prevented by the req->interrupted flag being true for these 994 * prevented by the req->aborted flag being true for these requests.
878 * requests. For this reason requests on the io list must be aborted 995 * For this reason requests on the io list must be aborted first.
879 * first.
880 */ 996 */
881void fuse_abort_conn(struct fuse_conn *fc) 997void fuse_abort_conn(struct fuse_conn *fc)
882{ 998{
883 spin_lock(&fc->lock); 999 spin_lock(&fc->lock);
884 if (fc->connected) { 1000 if (fc->connected) {
885 fc->connected = 0; 1001 fc->connected = 0;
1002 fc->blocked = 0;
886 end_io_requests(fc); 1003 end_io_requests(fc);
887 end_requests(fc, &fc->pending); 1004 end_requests(fc, &fc->pending);
888 end_requests(fc, &fc->processing); 1005 end_requests(fc, &fc->processing);
889 wake_up_all(&fc->waitq); 1006 wake_up_all(&fc->waitq);
1007 wake_up_all(&fc->blocked_waitq);
890 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 1008 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
891 } 1009 }
892 spin_unlock(&fc->lock); 1010 spin_unlock(&fc->lock);
@@ -902,7 +1020,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
902 end_requests(fc, &fc->processing); 1020 end_requests(fc, &fc->processing);
903 spin_unlock(&fc->lock); 1021 spin_unlock(&fc->lock);
904 fasync_helper(-1, file, 0, &fc->fasync); 1022 fasync_helper(-1, file, 0, &fc->fasync);
905 kobject_put(&fc->kobj); 1023 fuse_conn_put(fc);
906 } 1024 }
907 1025
908 return 0; 1026 return 0;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 8d7546e832e8..72a74cde6de8 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1,6 +1,6 @@
1/* 1/*
2 FUSE: Filesystem in Userspace 2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> 3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4 4
5 This program can be distributed under the terms of the GNU GPL. 5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING. 6 See the file COPYING.
@@ -79,7 +79,6 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
79{ 79{
80 req->in.h.opcode = FUSE_LOOKUP; 80 req->in.h.opcode = FUSE_LOOKUP;
81 req->in.h.nodeid = get_node_id(dir); 81 req->in.h.nodeid = get_node_id(dir);
82 req->inode = dir;
83 req->in.numargs = 1; 82 req->in.numargs = 1;
84 req->in.args[0].size = entry->d_name.len + 1; 83 req->in.args[0].size = entry->d_name.len + 1;
85 req->in.args[0].value = entry->d_name.name; 84 req->in.args[0].value = entry->d_name.name;
@@ -225,6 +224,20 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
225} 224}
226 225
227/* 226/*
227 * Synchronous release for the case when something goes wrong in CREATE_OPEN
228 */
229static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff,
230 u64 nodeid, int flags)
231{
232 struct fuse_req *req;
233
234 req = fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE);
235 req->force = 1;
236 request_send(fc, req);
237 fuse_put_request(fc, req);
238}
239
240/*
228 * Atomic create+open operation 241 * Atomic create+open operation
229 * 242 *
230 * If the filesystem doesn't support this, then fall back to separate 243 * If the filesystem doesn't support this, then fall back to separate
@@ -237,6 +250,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
237 struct inode *inode; 250 struct inode *inode;
238 struct fuse_conn *fc = get_fuse_conn(dir); 251 struct fuse_conn *fc = get_fuse_conn(dir);
239 struct fuse_req *req; 252 struct fuse_req *req;
253 struct fuse_req *forget_req;
240 struct fuse_open_in inarg; 254 struct fuse_open_in inarg;
241 struct fuse_open_out outopen; 255 struct fuse_open_out outopen;
242 struct fuse_entry_out outentry; 256 struct fuse_entry_out outentry;
@@ -247,9 +261,14 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
247 if (fc->no_create) 261 if (fc->no_create)
248 return -ENOSYS; 262 return -ENOSYS;
249 263
264 forget_req = fuse_get_req(fc);
265 if (IS_ERR(forget_req))
266 return PTR_ERR(forget_req);
267
250 req = fuse_get_req(fc); 268 req = fuse_get_req(fc);
269 err = PTR_ERR(req);
251 if (IS_ERR(req)) 270 if (IS_ERR(req))
252 return PTR_ERR(req); 271 goto out_put_forget_req;
253 272
254 err = -ENOMEM; 273 err = -ENOMEM;
255 ff = fuse_file_alloc(); 274 ff = fuse_file_alloc();
@@ -262,7 +281,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
262 inarg.mode = mode; 281 inarg.mode = mode;
263 req->in.h.opcode = FUSE_CREATE; 282 req->in.h.opcode = FUSE_CREATE;
264 req->in.h.nodeid = get_node_id(dir); 283 req->in.h.nodeid = get_node_id(dir);
265 req->inode = dir;
266 req->in.numargs = 2; 284 req->in.numargs = 2;
267 req->in.args[0].size = sizeof(inarg); 285 req->in.args[0].size = sizeof(inarg);
268 req->in.args[0].value = &inarg; 286 req->in.args[0].value = &inarg;
@@ -285,25 +303,23 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
285 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid)) 303 if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
286 goto out_free_ff; 304 goto out_free_ff;
287 305
306 fuse_put_request(fc, req);
288 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, 307 inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
289 &outentry.attr); 308 &outentry.attr);
290 err = -ENOMEM;
291 if (!inode) { 309 if (!inode) {
292 flags &= ~(O_CREAT | O_EXCL | O_TRUNC); 310 flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
293 ff->fh = outopen.fh; 311 ff->fh = outopen.fh;
294 /* Special release, with inode = NULL, this will 312 fuse_sync_release(fc, ff, outentry.nodeid, flags);
295 trigger a 'forget' request when the release is 313 fuse_send_forget(fc, forget_req, outentry.nodeid, 1);
296 complete */ 314 return -ENOMEM;
297 fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0);
298 goto out_put_request;
299 } 315 }
300 fuse_put_request(fc, req); 316 fuse_put_request(fc, forget_req);
301 d_instantiate(entry, inode); 317 d_instantiate(entry, inode);
302 fuse_change_timeout(entry, &outentry); 318 fuse_change_timeout(entry, &outentry);
303 file = lookup_instantiate_filp(nd, entry, generic_file_open); 319 file = lookup_instantiate_filp(nd, entry, generic_file_open);
304 if (IS_ERR(file)) { 320 if (IS_ERR(file)) {
305 ff->fh = outopen.fh; 321 ff->fh = outopen.fh;
306 fuse_send_release(fc, ff, outentry.nodeid, inode, flags, 0); 322 fuse_sync_release(fc, ff, outentry.nodeid, flags);
307 return PTR_ERR(file); 323 return PTR_ERR(file);
308 } 324 }
309 fuse_finish_open(inode, file, ff, &outopen); 325 fuse_finish_open(inode, file, ff, &outopen);
@@ -313,6 +329,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
313 fuse_file_free(ff); 329 fuse_file_free(ff);
314 out_put_request: 330 out_put_request:
315 fuse_put_request(fc, req); 331 fuse_put_request(fc, req);
332 out_put_forget_req:
333 fuse_put_request(fc, forget_req);
316 return err; 334 return err;
317} 335}
318 336
@@ -328,7 +346,6 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
328 int err; 346 int err;
329 347
330 req->in.h.nodeid = get_node_id(dir); 348 req->in.h.nodeid = get_node_id(dir);
331 req->inode = dir;
332 req->out.numargs = 1; 349 req->out.numargs = 1;
333 req->out.args[0].size = sizeof(outarg); 350 req->out.args[0].size = sizeof(outarg);
334 req->out.args[0].value = &outarg; 351 req->out.args[0].value = &outarg;
@@ -448,7 +465,6 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
448 465
449 req->in.h.opcode = FUSE_UNLINK; 466 req->in.h.opcode = FUSE_UNLINK;
450 req->in.h.nodeid = get_node_id(dir); 467 req->in.h.nodeid = get_node_id(dir);
451 req->inode = dir;
452 req->in.numargs = 1; 468 req->in.numargs = 1;
453 req->in.args[0].size = entry->d_name.len + 1; 469 req->in.args[0].size = entry->d_name.len + 1;
454 req->in.args[0].value = entry->d_name.name; 470 req->in.args[0].value = entry->d_name.name;
@@ -480,7 +496,6 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
480 496
481 req->in.h.opcode = FUSE_RMDIR; 497 req->in.h.opcode = FUSE_RMDIR;
482 req->in.h.nodeid = get_node_id(dir); 498 req->in.h.nodeid = get_node_id(dir);
483 req->inode = dir;
484 req->in.numargs = 1; 499 req->in.numargs = 1;
485 req->in.args[0].size = entry->d_name.len + 1; 500 req->in.args[0].size = entry->d_name.len + 1;
486 req->in.args[0].value = entry->d_name.name; 501 req->in.args[0].value = entry->d_name.name;
@@ -510,8 +525,6 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
510 inarg.newdir = get_node_id(newdir); 525 inarg.newdir = get_node_id(newdir);
511 req->in.h.opcode = FUSE_RENAME; 526 req->in.h.opcode = FUSE_RENAME;
512 req->in.h.nodeid = get_node_id(olddir); 527 req->in.h.nodeid = get_node_id(olddir);
513 req->inode = olddir;
514 req->inode2 = newdir;
515 req->in.numargs = 3; 528 req->in.numargs = 3;
516 req->in.args[0].size = sizeof(inarg); 529 req->in.args[0].size = sizeof(inarg);
517 req->in.args[0].value = &inarg; 530 req->in.args[0].value = &inarg;
@@ -558,7 +571,6 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
558 memset(&inarg, 0, sizeof(inarg)); 571 memset(&inarg, 0, sizeof(inarg));
559 inarg.oldnodeid = get_node_id(inode); 572 inarg.oldnodeid = get_node_id(inode);
560 req->in.h.opcode = FUSE_LINK; 573 req->in.h.opcode = FUSE_LINK;
561 req->inode2 = inode;
562 req->in.numargs = 2; 574 req->in.numargs = 2;
563 req->in.args[0].size = sizeof(inarg); 575 req->in.args[0].size = sizeof(inarg);
564 req->in.args[0].value = &inarg; 576 req->in.args[0].value = &inarg;
@@ -587,7 +599,6 @@ int fuse_do_getattr(struct inode *inode)
587 599
588 req->in.h.opcode = FUSE_GETATTR; 600 req->in.h.opcode = FUSE_GETATTR;
589 req->in.h.nodeid = get_node_id(inode); 601 req->in.h.nodeid = get_node_id(inode);
590 req->inode = inode;
591 req->out.numargs = 1; 602 req->out.numargs = 1;
592 req->out.args[0].size = sizeof(arg); 603 req->out.args[0].size = sizeof(arg);
593 req->out.args[0].value = &arg; 604 req->out.args[0].value = &arg;
@@ -679,7 +690,6 @@ static int fuse_access(struct inode *inode, int mask)
679 inarg.mask = mask; 690 inarg.mask = mask;
680 req->in.h.opcode = FUSE_ACCESS; 691 req->in.h.opcode = FUSE_ACCESS;
681 req->in.h.nodeid = get_node_id(inode); 692 req->in.h.nodeid = get_node_id(inode);
682 req->inode = inode;
683 req->in.numargs = 1; 693 req->in.numargs = 1;
684 req->in.args[0].size = sizeof(inarg); 694 req->in.args[0].size = sizeof(inarg);
685 req->in.args[0].value = &inarg; 695 req->in.args[0].value = &inarg;
@@ -820,7 +830,6 @@ static char *read_link(struct dentry *dentry)
820 } 830 }
821 req->in.h.opcode = FUSE_READLINK; 831 req->in.h.opcode = FUSE_READLINK;
822 req->in.h.nodeid = get_node_id(inode); 832 req->in.h.nodeid = get_node_id(inode);
823 req->inode = inode;
824 req->out.argvar = 1; 833 req->out.argvar = 1;
825 req->out.numargs = 1; 834 req->out.numargs = 1;
826 req->out.args[0].size = PAGE_SIZE - 1; 835 req->out.args[0].size = PAGE_SIZE - 1;
@@ -939,7 +948,6 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
939 iattr_to_fattr(attr, &inarg); 948 iattr_to_fattr(attr, &inarg);
940 req->in.h.opcode = FUSE_SETATTR; 949 req->in.h.opcode = FUSE_SETATTR;
941 req->in.h.nodeid = get_node_id(inode); 950 req->in.h.nodeid = get_node_id(inode);
942 req->inode = inode;
943 req->in.numargs = 1; 951 req->in.numargs = 1;
944 req->in.args[0].size = sizeof(inarg); 952 req->in.args[0].size = sizeof(inarg);
945 req->in.args[0].value = &inarg; 953 req->in.args[0].value = &inarg;
@@ -1002,7 +1010,6 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
1002 inarg.flags = flags; 1010 inarg.flags = flags;
1003 req->in.h.opcode = FUSE_SETXATTR; 1011 req->in.h.opcode = FUSE_SETXATTR;
1004 req->in.h.nodeid = get_node_id(inode); 1012 req->in.h.nodeid = get_node_id(inode);
1005 req->inode = inode;
1006 req->in.numargs = 3; 1013 req->in.numargs = 3;
1007 req->in.args[0].size = sizeof(inarg); 1014 req->in.args[0].size = sizeof(inarg);
1008 req->in.args[0].value = &inarg; 1015 req->in.args[0].value = &inarg;
@@ -1041,7 +1048,6 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
1041 inarg.size = size; 1048 inarg.size = size;
1042 req->in.h.opcode = FUSE_GETXATTR; 1049 req->in.h.opcode = FUSE_GETXATTR;
1043 req->in.h.nodeid = get_node_id(inode); 1050 req->in.h.nodeid = get_node_id(inode);
1044 req->inode = inode;
1045 req->in.numargs = 2; 1051 req->in.numargs = 2;
1046 req->in.args[0].size = sizeof(inarg); 1052 req->in.args[0].size = sizeof(inarg);
1047 req->in.args[0].value = &inarg; 1053 req->in.args[0].value = &inarg;
@@ -1091,7 +1097,6 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
1091 inarg.size = size; 1097 inarg.size = size;
1092 req->in.h.opcode = FUSE_LISTXATTR; 1098 req->in.h.opcode = FUSE_LISTXATTR;
1093 req->in.h.nodeid = get_node_id(inode); 1099 req->in.h.nodeid = get_node_id(inode);
1094 req->inode = inode;
1095 req->in.numargs = 1; 1100 req->in.numargs = 1;
1096 req->in.args[0].size = sizeof(inarg); 1101 req->in.args[0].size = sizeof(inarg);
1097 req->in.args[0].value = &inarg; 1102 req->in.args[0].value = &inarg;
@@ -1135,7 +1140,6 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
1135 1140
1136 req->in.h.opcode = FUSE_REMOVEXATTR; 1141 req->in.h.opcode = FUSE_REMOVEXATTR;
1137 req->in.h.nodeid = get_node_id(inode); 1142 req->in.h.nodeid = get_node_id(inode);
1138 req->inode = inode;
1139 req->in.numargs = 1; 1143 req->in.numargs = 1;
1140 req->in.args[0].size = strlen(name) + 1; 1144 req->in.args[0].size = strlen(name) + 1;
1141 req->in.args[0].value = name; 1145 req->in.args[0].value = name;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 087f3b734f40..28aa81eae2cc 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -30,7 +30,6 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); 30 inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC);
31 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; 31 req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
32 req->in.h.nodeid = get_node_id(inode); 32 req->in.h.nodeid = get_node_id(inode);
33 req->inode = inode;
34 req->in.numargs = 1; 33 req->in.numargs = 1;
35 req->in.args[0].size = sizeof(inarg); 34 req->in.args[0].size = sizeof(inarg);
36 req->in.args[0].value = &inarg; 35 req->in.args[0].value = &inarg;
@@ -49,8 +48,8 @@ struct fuse_file *fuse_file_alloc(void)
49 struct fuse_file *ff; 48 struct fuse_file *ff;
50 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); 49 ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
51 if (ff) { 50 if (ff) {
52 ff->release_req = fuse_request_alloc(); 51 ff->reserved_req = fuse_request_alloc();
53 if (!ff->release_req) { 52 if (!ff->reserved_req) {
54 kfree(ff); 53 kfree(ff);
55 ff = NULL; 54 ff = NULL;
56 } 55 }
@@ -60,7 +59,7 @@ struct fuse_file *fuse_file_alloc(void)
60 59
61void fuse_file_free(struct fuse_file *ff) 60void fuse_file_free(struct fuse_file *ff)
62{ 61{
63 fuse_request_free(ff->release_req); 62 fuse_request_free(ff->reserved_req);
64 kfree(ff); 63 kfree(ff);
65} 64}
66 65
@@ -113,37 +112,22 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
113 return err; 112 return err;
114} 113}
115 114
116/* Special case for failed iget in CREATE */ 115struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags,
117static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 116 int opcode)
118{ 117{
119 /* If called from end_io_requests(), req has more than one 118 struct fuse_req *req = ff->reserved_req;
120 reference and fuse_reset_request() cannot work */
121 if (fc->connected) {
122 u64 nodeid = req->in.h.nodeid;
123 fuse_reset_request(req);
124 fuse_send_forget(fc, req, nodeid, 1);
125 } else
126 fuse_put_request(fc, req);
127}
128
129void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
130 u64 nodeid, struct inode *inode, int flags, int isdir)
131{
132 struct fuse_req * req = ff->release_req;
133 struct fuse_release_in *inarg = &req->misc.release_in; 119 struct fuse_release_in *inarg = &req->misc.release_in;
134 120
135 inarg->fh = ff->fh; 121 inarg->fh = ff->fh;
136 inarg->flags = flags; 122 inarg->flags = flags;
137 req->in.h.opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; 123 req->in.h.opcode = opcode;
138 req->in.h.nodeid = nodeid; 124 req->in.h.nodeid = nodeid;
139 req->inode = inode;
140 req->in.numargs = 1; 125 req->in.numargs = 1;
141 req->in.args[0].size = sizeof(struct fuse_release_in); 126 req->in.args[0].size = sizeof(struct fuse_release_in);
142 req->in.args[0].value = inarg; 127 req->in.args[0].value = inarg;
143 request_send_background(fc, req);
144 if (!inode)
145 req->end = fuse_release_end;
146 kfree(ff); 128 kfree(ff);
129
130 return req;
147} 131}
148 132
149int fuse_release_common(struct inode *inode, struct file *file, int isdir) 133int fuse_release_common(struct inode *inode, struct file *file, int isdir)
@@ -151,8 +135,15 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir)
151 struct fuse_file *ff = file->private_data; 135 struct fuse_file *ff = file->private_data;
152 if (ff) { 136 if (ff) {
153 struct fuse_conn *fc = get_fuse_conn(inode); 137 struct fuse_conn *fc = get_fuse_conn(inode);
154 u64 nodeid = get_node_id(inode); 138 struct fuse_req *req;
155 fuse_send_release(fc, ff, nodeid, inode, file->f_flags, isdir); 139
140 req = fuse_release_fill(ff, get_node_id(inode), file->f_flags,
141 isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
142
143 /* Hold vfsmount and dentry until release is finished */
144 req->vfsmount = mntget(file->f_vfsmnt);
145 req->dentry = dget(file->f_dentry);
146 request_send_background(fc, req);
156 } 147 }
157 148
158 /* Return value is ignored by VFS */ 149 /* Return value is ignored by VFS */
@@ -169,6 +160,28 @@ static int fuse_release(struct inode *inode, struct file *file)
169 return fuse_release_common(inode, file, 0); 160 return fuse_release_common(inode, file, 0);
170} 161}
171 162
163/*
164 * Scramble the ID space with XTEA, so that the value of the files_struct
165 * pointer is not exposed to userspace.
166 */
167static u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
168{
169 u32 *k = fc->scramble_key;
170 u64 v = (unsigned long) id;
171 u32 v0 = v;
172 u32 v1 = v >> 32;
173 u32 sum = 0;
174 int i;
175
176 for (i = 0; i < 32; i++) {
177 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
178 sum += 0x9E3779B9;
179 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
180 }
181
182 return (u64) v0 + ((u64) v1 << 32);
183}
184
172static int fuse_flush(struct file *file, fl_owner_t id) 185static int fuse_flush(struct file *file, fl_owner_t id)
173{ 186{
174 struct inode *inode = file->f_dentry->d_inode; 187 struct inode *inode = file->f_dentry->d_inode;
@@ -184,19 +197,16 @@ static int fuse_flush(struct file *file, fl_owner_t id)
184 if (fc->no_flush) 197 if (fc->no_flush)
185 return 0; 198 return 0;
186 199
187 req = fuse_get_req(fc); 200 req = fuse_get_req_nofail(fc, file);
188 if (IS_ERR(req))
189 return PTR_ERR(req);
190
191 memset(&inarg, 0, sizeof(inarg)); 201 memset(&inarg, 0, sizeof(inarg));
192 inarg.fh = ff->fh; 202 inarg.fh = ff->fh;
203 inarg.lock_owner = fuse_lock_owner_id(fc, id);
193 req->in.h.opcode = FUSE_FLUSH; 204 req->in.h.opcode = FUSE_FLUSH;
194 req->in.h.nodeid = get_node_id(inode); 205 req->in.h.nodeid = get_node_id(inode);
195 req->inode = inode;
196 req->file = file;
197 req->in.numargs = 1; 206 req->in.numargs = 1;
198 req->in.args[0].size = sizeof(inarg); 207 req->in.args[0].size = sizeof(inarg);
199 req->in.args[0].value = &inarg; 208 req->in.args[0].value = &inarg;
209 req->force = 1;
200 request_send(fc, req); 210 request_send(fc, req);
201 err = req->out.h.error; 211 err = req->out.h.error;
202 fuse_put_request(fc, req); 212 fuse_put_request(fc, req);
@@ -232,8 +242,6 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
232 inarg.fsync_flags = datasync ? 1 : 0; 242 inarg.fsync_flags = datasync ? 1 : 0;
233 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; 243 req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
234 req->in.h.nodeid = get_node_id(inode); 244 req->in.h.nodeid = get_node_id(inode);
235 req->inode = inode;
236 req->file = file;
237 req->in.numargs = 1; 245 req->in.numargs = 1;
238 req->in.args[0].size = sizeof(inarg); 246 req->in.args[0].size = sizeof(inarg);
239 req->in.args[0].value = &inarg; 247 req->in.args[0].value = &inarg;
@@ -266,8 +274,6 @@ void fuse_read_fill(struct fuse_req *req, struct file *file,
266 inarg->size = count; 274 inarg->size = count;
267 req->in.h.opcode = opcode; 275 req->in.h.opcode = opcode;
268 req->in.h.nodeid = get_node_id(inode); 276 req->in.h.nodeid = get_node_id(inode);
269 req->inode = inode;
270 req->file = file;
271 req->in.numargs = 1; 277 req->in.numargs = 1;
272 req->in.args[0].size = sizeof(struct fuse_read_in); 278 req->in.args[0].size = sizeof(struct fuse_read_in);
273 req->in.args[0].value = inarg; 279 req->in.args[0].value = inarg;
@@ -342,6 +348,8 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file,
342 req->out.page_zeroing = 1; 348 req->out.page_zeroing = 1;
343 fuse_read_fill(req, file, inode, pos, count, FUSE_READ); 349 fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
344 if (fc->async_read) { 350 if (fc->async_read) {
351 get_file(file);
352 req->file = file;
345 req->end = fuse_readpages_end; 353 req->end = fuse_readpages_end;
346 request_send_background(fc, req); 354 request_send_background(fc, req);
347 } else { 355 } else {
@@ -420,8 +428,6 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file,
420 inarg.size = count; 428 inarg.size = count;
421 req->in.h.opcode = FUSE_WRITE; 429 req->in.h.opcode = FUSE_WRITE;
422 req->in.h.nodeid = get_node_id(inode); 430 req->in.h.nodeid = get_node_id(inode);
423 req->inode = inode;
424 req->file = file;
425 req->in.argpages = 1; 431 req->in.argpages = 1;
426 req->in.numargs = 2; 432 req->in.numargs = 2;
427 req->in.args[0].size = sizeof(struct fuse_write_in); 433 req->in.args[0].size = sizeof(struct fuse_write_in);
@@ -619,6 +625,126 @@ static int fuse_set_page_dirty(struct page *page)
619 return 0; 625 return 0;
620} 626}
621 627
628static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
629 struct file_lock *fl)
630{
631 switch (ffl->type) {
632 case F_UNLCK:
633 break;
634
635 case F_RDLCK:
636 case F_WRLCK:
637 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
638 ffl->end < ffl->start)
639 return -EIO;
640
641 fl->fl_start = ffl->start;
642 fl->fl_end = ffl->end;
643 fl->fl_pid = ffl->pid;
644 break;
645
646 default:
647 return -EIO;
648 }
649 fl->fl_type = ffl->type;
650 return 0;
651}
652
653static void fuse_lk_fill(struct fuse_req *req, struct file *file,
654 const struct file_lock *fl, int opcode, pid_t pid)
655{
656 struct inode *inode = file->f_dentry->d_inode;
657 struct fuse_conn *fc = get_fuse_conn(inode);
658 struct fuse_file *ff = file->private_data;
659 struct fuse_lk_in *arg = &req->misc.lk_in;
660
661 arg->fh = ff->fh;
662 arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
663 arg->lk.start = fl->fl_start;
664 arg->lk.end = fl->fl_end;
665 arg->lk.type = fl->fl_type;
666 arg->lk.pid = pid;
667 req->in.h.opcode = opcode;
668 req->in.h.nodeid = get_node_id(inode);
669 req->in.numargs = 1;
670 req->in.args[0].size = sizeof(*arg);
671 req->in.args[0].value = arg;
672}
673
674static int fuse_getlk(struct file *file, struct file_lock *fl)
675{
676 struct inode *inode = file->f_dentry->d_inode;
677 struct fuse_conn *fc = get_fuse_conn(inode);
678 struct fuse_req *req;
679 struct fuse_lk_out outarg;
680 int err;
681
682 req = fuse_get_req(fc);
683 if (IS_ERR(req))
684 return PTR_ERR(req);
685
686 fuse_lk_fill(req, file, fl, FUSE_GETLK, 0);
687 req->out.numargs = 1;
688 req->out.args[0].size = sizeof(outarg);
689 req->out.args[0].value = &outarg;
690 request_send(fc, req);
691 err = req->out.h.error;
692 fuse_put_request(fc, req);
693 if (!err)
694 err = convert_fuse_file_lock(&outarg.lk, fl);
695
696 return err;
697}
698
699static int fuse_setlk(struct file *file, struct file_lock *fl)
700{
701 struct inode *inode = file->f_dentry->d_inode;
702 struct fuse_conn *fc = get_fuse_conn(inode);
703 struct fuse_req *req;
704 int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
705 pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
706 int err;
707
708 /* Unlock on close is handled by the flush method */
709 if (fl->fl_flags & FL_CLOSE)
710 return 0;
711
712 req = fuse_get_req(fc);
713 if (IS_ERR(req))
714 return PTR_ERR(req);
715
716 fuse_lk_fill(req, file, fl, opcode, pid);
717 request_send(fc, req);
718 err = req->out.h.error;
719 /* locking is restartable */
720 if (err == -EINTR)
721 err = -ERESTARTSYS;
722 fuse_put_request(fc, req);
723 return err;
724}
725
726static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
727{
728 struct inode *inode = file->f_dentry->d_inode;
729 struct fuse_conn *fc = get_fuse_conn(inode);
730 int err;
731
732 if (cmd == F_GETLK) {
733 if (fc->no_lock) {
734 if (!posix_test_lock(file, fl, fl))
735 fl->fl_type = F_UNLCK;
736 err = 0;
737 } else
738 err = fuse_getlk(file, fl);
739 } else {
740 if (fc->no_lock)
741 err = posix_lock_file_wait(file, fl);
742 else
743 err = fuse_setlk(file, fl);
744 }
745 return err;
746}
747
622static const struct file_operations fuse_file_operations = { 748static const struct file_operations fuse_file_operations = {
623 .llseek = generic_file_llseek, 749 .llseek = generic_file_llseek,
624 .read = generic_file_read, 750 .read = generic_file_read,
@@ -628,6 +754,7 @@ static const struct file_operations fuse_file_operations = {
628 .flush = fuse_flush, 754 .flush = fuse_flush,
629 .release = fuse_release, 755 .release = fuse_release,
630 .fsync = fuse_fsync, 756 .fsync = fuse_fsync,
757 .lock = fuse_file_lock,
631 .sendfile = generic_file_sendfile, 758 .sendfile = generic_file_sendfile,
632}; 759};
633 760
@@ -639,6 +766,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
639 .flush = fuse_flush, 766 .flush = fuse_flush,
640 .release = fuse_release, 767 .release = fuse_release,
641 .fsync = fuse_fsync, 768 .fsync = fuse_fsync,
769 .lock = fuse_file_lock,
642 /* no mmap and sendfile */ 770 /* no mmap and sendfile */
643}; 771};
644 772
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 0474202cb5dc..0dbf96621841 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -8,12 +8,13 @@
8 8
9#include <linux/fuse.h> 9#include <linux/fuse.h>
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/mount.h>
11#include <linux/wait.h> 12#include <linux/wait.h>
12#include <linux/list.h> 13#include <linux/list.h>
13#include <linux/spinlock.h> 14#include <linux/spinlock.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/backing-dev.h> 16#include <linux/backing-dev.h>
16#include <asm/semaphore.h> 17#include <linux/mutex.h>
17 18
18/** Max number of pages that can be used in a single read request */ 19/** Max number of pages that can be used in a single read request */
19#define FUSE_MAX_PAGES_PER_REQ 32 20#define FUSE_MAX_PAGES_PER_REQ 32
@@ -24,6 +25,9 @@
24/** It could be as large as PATH_MAX, but would that have any uses? */ 25/** It could be as large as PATH_MAX, but would that have any uses? */
25#define FUSE_NAME_MAX 1024 26#define FUSE_NAME_MAX 1024
26 27
28/** Number of dentries for each connection in the control filesystem */
29#define FUSE_CTL_NUM_DENTRIES 3
30
27/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem 31/** If the FUSE_DEFAULT_PERMISSIONS flag is given, the filesystem
28 module will check permissions based on the file mode. Otherwise no 32 module will check permissions based on the file mode. Otherwise no
29 permission checking is done in the kernel */ 33 permission checking is done in the kernel */
@@ -33,6 +37,11 @@
33 doing the mount will be allowed to access the filesystem */ 37 doing the mount will be allowed to access the filesystem */
34#define FUSE_ALLOW_OTHER (1 << 1) 38#define FUSE_ALLOW_OTHER (1 << 1)
35 39
40/** List of active connections */
41extern struct list_head fuse_conn_list;
42
43/** Global mutex protecting fuse_conn_list and the control filesystem */
44extern struct mutex fuse_mutex;
36 45
37/** FUSE inode */ 46/** FUSE inode */
38struct fuse_inode { 47struct fuse_inode {
@@ -56,7 +65,7 @@ struct fuse_inode {
56/** FUSE specific file data */ 65/** FUSE specific file data */
57struct fuse_file { 66struct fuse_file {
58 /** Request reserved for flush and release */ 67 /** Request reserved for flush and release */
59 struct fuse_req *release_req; 68 struct fuse_req *reserved_req;
60 69
61 /** File handle used by userspace */ 70 /** File handle used by userspace */
62 u64 fh; 71 u64 fh;
@@ -122,6 +131,7 @@ enum fuse_req_state {
122 FUSE_REQ_PENDING, 131 FUSE_REQ_PENDING,
123 FUSE_REQ_READING, 132 FUSE_REQ_READING,
124 FUSE_REQ_SENT, 133 FUSE_REQ_SENT,
134 FUSE_REQ_WRITING,
125 FUSE_REQ_FINISHED 135 FUSE_REQ_FINISHED
126}; 136};
127 137
@@ -135,12 +145,15 @@ struct fuse_req {
135 fuse_conn */ 145 fuse_conn */
136 struct list_head list; 146 struct list_head list;
137 147
138 /** Entry on the background list */ 148 /** Entry on the interrupts list */
139 struct list_head bg_entry; 149 struct list_head intr_entry;
140 150
141 /** refcount */ 151 /** refcount */
142 atomic_t count; 152 atomic_t count;
143 153
154 /** Unique ID for the interrupt request */
155 u64 intr_unique;
156
144 /* 157 /*
145 * The following bitfields are either set once before the 158 * The following bitfields are either set once before the
146 * request is queued or setting/clearing them is protected by 159 * request is queued or setting/clearing them is protected by
@@ -150,12 +163,18 @@ struct fuse_req {
150 /** True if the request has reply */ 163 /** True if the request has reply */
151 unsigned isreply:1; 164 unsigned isreply:1;
152 165
153 /** The request was interrupted */ 166 /** Force sending of the request even if interrupted */
154 unsigned interrupted:1; 167 unsigned force:1;
168
169 /** The request was aborted */
170 unsigned aborted:1;
155 171
156 /** Request is sent in the background */ 172 /** Request is sent in the background */
157 unsigned background:1; 173 unsigned background:1;
158 174
175 /** The request has been interrupted */
176 unsigned interrupted:1;
177
159 /** Data is being copied to/from the request */ 178 /** Data is being copied to/from the request */
160 unsigned locked:1; 179 unsigned locked:1;
161 180
@@ -181,6 +200,7 @@ struct fuse_req {
181 struct fuse_init_in init_in; 200 struct fuse_init_in init_in;
182 struct fuse_init_out init_out; 201 struct fuse_init_out init_out;
183 struct fuse_read_in read_in; 202 struct fuse_read_in read_in;
203 struct fuse_lk_in lk_in;
184 } misc; 204 } misc;
185 205
186 /** page vector */ 206 /** page vector */
@@ -192,17 +212,20 @@ struct fuse_req {
192 /** offset of data on first page */ 212 /** offset of data on first page */
193 unsigned page_offset; 213 unsigned page_offset;
194 214
195 /** Inode used in the request */
196 struct inode *inode;
197
198 /** Second inode used in the request (or NULL) */
199 struct inode *inode2;
200
201 /** File used in the request (or NULL) */ 215 /** File used in the request (or NULL) */
202 struct file *file; 216 struct file *file;
203 217
218 /** vfsmount used in release */
219 struct vfsmount *vfsmount;
220
221 /** dentry used in release */
222 struct dentry *dentry;
223
204 /** Request completion callback */ 224 /** Request completion callback */
205 void (*end)(struct fuse_conn *, struct fuse_req *); 225 void (*end)(struct fuse_conn *, struct fuse_req *);
226
227 /** Request is stolen from fuse_file->reserved_req */
228 struct file *stolen_file;
206}; 229};
207 230
208/** 231/**
@@ -216,6 +239,9 @@ struct fuse_conn {
216 /** Lock protecting accessess to members of this structure */ 239 /** Lock protecting accessess to members of this structure */
217 spinlock_t lock; 240 spinlock_t lock;
218 241
242 /** Refcount */
243 atomic_t count;
244
219 /** The user id for this mount */ 245 /** The user id for this mount */
220 uid_t user_id; 246 uid_t user_id;
221 247
@@ -243,13 +269,12 @@ struct fuse_conn {
243 /** The list of requests under I/O */ 269 /** The list of requests under I/O */
244 struct list_head io; 270 struct list_head io;
245 271
246 /** Requests put in the background (RELEASE or any other
247 interrupted request) */
248 struct list_head background;
249
250 /** Number of requests currently in the background */ 272 /** Number of requests currently in the background */
251 unsigned num_background; 273 unsigned num_background;
252 274
275 /** Pending interrupts */
276 struct list_head interrupts;
277
253 /** Flag indicating if connection is blocked. This will be 278 /** Flag indicating if connection is blocked. This will be
254 the case before the INIT reply is received, and if there 279 the case before the INIT reply is received, and if there
255 are too many outstading backgrounds requests */ 280 are too many outstading backgrounds requests */
@@ -258,15 +283,9 @@ struct fuse_conn {
258 /** waitq for blocked connection */ 283 /** waitq for blocked connection */
259 wait_queue_head_t blocked_waitq; 284 wait_queue_head_t blocked_waitq;
260 285
261 /** RW semaphore for exclusion with fuse_put_super() */
262 struct rw_semaphore sbput_sem;
263
264 /** The next unique request id */ 286 /** The next unique request id */
265 u64 reqctr; 287 u64 reqctr;
266 288
267 /** Mount is active */
268 unsigned mounted;
269
270 /** Connection established, cleared on umount, connection 289 /** Connection established, cleared on umount, connection
271 abort and device release */ 290 abort and device release */
272 unsigned connected; 291 unsigned connected;
@@ -305,12 +324,18 @@ struct fuse_conn {
305 /** Is removexattr not implemented by fs? */ 324 /** Is removexattr not implemented by fs? */
306 unsigned no_removexattr : 1; 325 unsigned no_removexattr : 1;
307 326
327 /** Are file locking primitives not implemented by fs? */
328 unsigned no_lock : 1;
329
308 /** Is access not implemented by fs? */ 330 /** Is access not implemented by fs? */
309 unsigned no_access : 1; 331 unsigned no_access : 1;
310 332
311 /** Is create not implemented by fs? */ 333 /** Is create not implemented by fs? */
312 unsigned no_create : 1; 334 unsigned no_create : 1;
313 335
336 /** Is interrupt not implemented by fs? */
337 unsigned no_interrupt : 1;
338
314 /** The number of requests waiting for completion */ 339 /** The number of requests waiting for completion */
315 atomic_t num_waiting; 340 atomic_t num_waiting;
316 341
@@ -320,11 +345,23 @@ struct fuse_conn {
320 /** Backing dev info */ 345 /** Backing dev info */
321 struct backing_dev_info bdi; 346 struct backing_dev_info bdi;
322 347
323 /** kobject */ 348 /** Entry on the fuse_conn_list */
324 struct kobject kobj; 349 struct list_head entry;
350
351 /** Unique ID */
352 u64 id;
353
354 /** Dentries in the control filesystem */
355 struct dentry *ctl_dentry[FUSE_CTL_NUM_DENTRIES];
356
357 /** number of dentries used in the above array */
358 int ctl_ndents;
325 359
326 /** O_ASYNC requests */ 360 /** O_ASYNC requests */
327 struct fasync_struct *fasync; 361 struct fasync_struct *fasync;
362
363 /** Key for lock owner ID scrambling */
364 u32 scramble_key[4];
328}; 365};
329 366
330static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) 367static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@@ -337,11 +374,6 @@ static inline struct fuse_conn *get_fuse_conn(struct inode *inode)
337 return get_fuse_conn_super(inode->i_sb); 374 return get_fuse_conn_super(inode->i_sb);
338} 375}
339 376
340static inline struct fuse_conn *get_fuse_conn_kobj(struct kobject *obj)
341{
342 return container_of(obj, struct fuse_conn, kobj);
343}
344
345static inline struct fuse_inode *get_fuse_inode(struct inode *inode) 377static inline struct fuse_inode *get_fuse_inode(struct inode *inode)
346{ 378{
347 return container_of(inode, struct fuse_inode, inode); 379 return container_of(inode, struct fuse_inode, inode);
@@ -383,12 +415,9 @@ void fuse_file_free(struct fuse_file *ff);
383void fuse_finish_open(struct inode *inode, struct file *file, 415void fuse_finish_open(struct inode *inode, struct file *file,
384 struct fuse_file *ff, struct fuse_open_out *outarg); 416 struct fuse_file *ff, struct fuse_open_out *outarg);
385 417
386/** 418/** */
387 * Send a RELEASE request 419struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags,
388 */ 420 int opcode);
389void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff,
390 u64 nodeid, struct inode *inode, int flags, int isdir);
391
392/** 421/**
393 * Send RELEASE or RELEASEDIR request 422 * Send RELEASE or RELEASEDIR request
394 */ 423 */
@@ -435,6 +464,9 @@ int fuse_dev_init(void);
435 */ 464 */
436void fuse_dev_cleanup(void); 465void fuse_dev_cleanup(void);
437 466
467int fuse_ctl_init(void);
468void fuse_ctl_cleanup(void);
469
438/** 470/**
439 * Allocate a request 471 * Allocate a request
440 */ 472 */
@@ -446,14 +478,14 @@ struct fuse_req *fuse_request_alloc(void);
446void fuse_request_free(struct fuse_req *req); 478void fuse_request_free(struct fuse_req *req);
447 479
448/** 480/**
449 * Reinitialize a request, the preallocated flag is left unmodified 481 * Get a request, may fail with -ENOMEM
450 */ 482 */
451void fuse_reset_request(struct fuse_req *req); 483struct fuse_req *fuse_get_req(struct fuse_conn *fc);
452 484
453/** 485/**
454 * Reserve a preallocated request 486 * Gets a requests for a file operation, always succeeds
455 */ 487 */
456struct fuse_req *fuse_get_req(struct fuse_conn *fc); 488struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file);
457 489
458/** 490/**
459 * Decrement reference count of a request. If count goes to zero free 491 * Decrement reference count of a request. If count goes to zero free
@@ -476,11 +508,6 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
476 */ 508 */
477void request_send_background(struct fuse_conn *fc, struct fuse_req *req); 509void request_send_background(struct fuse_conn *fc, struct fuse_req *req);
478 510
479/**
480 * Release inodes and file associated with background request
481 */
482void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req);
483
484/* Abort all requests */ 511/* Abort all requests */
485void fuse_abort_conn(struct fuse_conn *fc); 512void fuse_abort_conn(struct fuse_conn *fc);
486 513
@@ -493,3 +520,23 @@ int fuse_do_getattr(struct inode *inode);
493 * Invalidate inode attributes 520 * Invalidate inode attributes
494 */ 521 */
495void fuse_invalidate_attr(struct inode *inode); 522void fuse_invalidate_attr(struct inode *inode);
523
524/**
525 * Acquire reference to fuse_conn
526 */
527struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
528
529/**
530 * Release reference to fuse_conn
531 */
532void fuse_conn_put(struct fuse_conn *fc);
533
534/**
535 * Add connection to control filesystem
536 */
537int fuse_ctl_add_conn(struct fuse_conn *fc);
538
539/**
540 * Remove connection from control filesystem
541 */
542void fuse_ctl_remove_conn(struct fuse_conn *fc);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 815c824f4fc8..dcaaabd3b9c4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -11,25 +11,20 @@
11#include <linux/pagemap.h> 11#include <linux/pagemap.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/mount.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/parser.h> 17#include <linux/parser.h>
19#include <linux/statfs.h> 18#include <linux/statfs.h>
19#include <linux/random.h>
20 20
21MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); 21MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
22MODULE_DESCRIPTION("Filesystem in Userspace"); 22MODULE_DESCRIPTION("Filesystem in Userspace");
23MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
24 24
25static kmem_cache_t *fuse_inode_cachep; 25static kmem_cache_t *fuse_inode_cachep;
26static struct subsystem connections_subsys; 26struct list_head fuse_conn_list;
27 27DEFINE_MUTEX(fuse_mutex);
28struct fuse_conn_attr {
29 struct attribute attr;
30 ssize_t (*show)(struct fuse_conn *, char *);
31 ssize_t (*store)(struct fuse_conn *, const char *, size_t);
32};
33 28
34#define FUSE_SUPER_MAGIC 0x65735546 29#define FUSE_SUPER_MAGIC 0x65735546
35 30
@@ -104,6 +99,14 @@ static void fuse_clear_inode(struct inode *inode)
104 } 99 }
105} 100}
106 101
102static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
103{
104 if (*flags & MS_MANDLOCK)
105 return -EINVAL;
106
107 return 0;
108}
109
107void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr) 110void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
108{ 111{
109 if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size) 112 if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size)
@@ -205,22 +208,19 @@ static void fuse_put_super(struct super_block *sb)
205{ 208{
206 struct fuse_conn *fc = get_fuse_conn_super(sb); 209 struct fuse_conn *fc = get_fuse_conn_super(sb);
207 210
208 down_write(&fc->sbput_sem);
209 while (!list_empty(&fc->background))
210 fuse_release_background(fc,
211 list_entry(fc->background.next,
212 struct fuse_req, bg_entry));
213
214 spin_lock(&fc->lock); 211 spin_lock(&fc->lock);
215 fc->mounted = 0;
216 fc->connected = 0; 212 fc->connected = 0;
213 fc->blocked = 0;
217 spin_unlock(&fc->lock); 214 spin_unlock(&fc->lock);
218 up_write(&fc->sbput_sem);
219 /* Flush all readers on this fs */ 215 /* Flush all readers on this fs */
220 kill_fasync(&fc->fasync, SIGIO, POLL_IN); 216 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
221 wake_up_all(&fc->waitq); 217 wake_up_all(&fc->waitq);
222 kobject_del(&fc->kobj); 218 wake_up_all(&fc->blocked_waitq);
223 kobject_put(&fc->kobj); 219 mutex_lock(&fuse_mutex);
220 list_del(&fc->entry);
221 fuse_ctl_remove_conn(fc);
222 mutex_unlock(&fuse_mutex);
223 fuse_conn_put(fc);
224} 224}
225 225
226static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr) 226static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
@@ -370,11 +370,6 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
370 return 0; 370 return 0;
371} 371}
372 372
373static void fuse_conn_release(struct kobject *kobj)
374{
375 kfree(get_fuse_conn_kobj(kobj));
376}
377
378static struct fuse_conn *new_conn(void) 373static struct fuse_conn *new_conn(void)
379{ 374{
380 struct fuse_conn *fc; 375 struct fuse_conn *fc;
@@ -382,24 +377,35 @@ static struct fuse_conn *new_conn(void)
382 fc = kzalloc(sizeof(*fc), GFP_KERNEL); 377 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
383 if (fc) { 378 if (fc) {
384 spin_lock_init(&fc->lock); 379 spin_lock_init(&fc->lock);
380 atomic_set(&fc->count, 1);
385 init_waitqueue_head(&fc->waitq); 381 init_waitqueue_head(&fc->waitq);
386 init_waitqueue_head(&fc->blocked_waitq); 382 init_waitqueue_head(&fc->blocked_waitq);
387 INIT_LIST_HEAD(&fc->pending); 383 INIT_LIST_HEAD(&fc->pending);
388 INIT_LIST_HEAD(&fc->processing); 384 INIT_LIST_HEAD(&fc->processing);
389 INIT_LIST_HEAD(&fc->io); 385 INIT_LIST_HEAD(&fc->io);
390 INIT_LIST_HEAD(&fc->background); 386 INIT_LIST_HEAD(&fc->interrupts);
391 init_rwsem(&fc->sbput_sem);
392 kobj_set_kset_s(fc, connections_subsys);
393 kobject_init(&fc->kobj);
394 atomic_set(&fc->num_waiting, 0); 387 atomic_set(&fc->num_waiting, 0);
395 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 388 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
396 fc->bdi.unplug_io_fn = default_unplug_io_fn; 389 fc->bdi.unplug_io_fn = default_unplug_io_fn;
397 fc->reqctr = 0; 390 fc->reqctr = 0;
398 fc->blocked = 1; 391 fc->blocked = 1;
392 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
399 } 393 }
400 return fc; 394 return fc;
401} 395}
402 396
397void fuse_conn_put(struct fuse_conn *fc)
398{
399 if (atomic_dec_and_test(&fc->count))
400 kfree(fc);
401}
402
403struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
404{
405 atomic_inc(&fc->count);
406 return fc;
407}
408
403static struct inode *get_root_inode(struct super_block *sb, unsigned mode) 409static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
404{ 410{
405 struct fuse_attr attr; 411 struct fuse_attr attr;
@@ -415,6 +421,7 @@ static struct super_operations fuse_super_operations = {
415 .destroy_inode = fuse_destroy_inode, 421 .destroy_inode = fuse_destroy_inode,
416 .read_inode = fuse_read_inode, 422 .read_inode = fuse_read_inode,
417 .clear_inode = fuse_clear_inode, 423 .clear_inode = fuse_clear_inode,
424 .remount_fs = fuse_remount_fs,
418 .put_super = fuse_put_super, 425 .put_super = fuse_put_super,
419 .umount_begin = fuse_umount_begin, 426 .umount_begin = fuse_umount_begin,
420 .statfs = fuse_statfs, 427 .statfs = fuse_statfs,
@@ -434,8 +441,12 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
434 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; 441 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
435 if (arg->flags & FUSE_ASYNC_READ) 442 if (arg->flags & FUSE_ASYNC_READ)
436 fc->async_read = 1; 443 fc->async_read = 1;
437 } else 444 if (!(arg->flags & FUSE_POSIX_LOCKS))
445 fc->no_lock = 1;
446 } else {
438 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 447 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
448 fc->no_lock = 1;
449 }
439 450
440 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); 451 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
441 fc->minor = arg->minor; 452 fc->minor = arg->minor;
@@ -453,7 +464,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
453 arg->major = FUSE_KERNEL_VERSION; 464 arg->major = FUSE_KERNEL_VERSION;
454 arg->minor = FUSE_KERNEL_MINOR_VERSION; 465 arg->minor = FUSE_KERNEL_MINOR_VERSION;
455 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; 466 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
456 arg->flags |= FUSE_ASYNC_READ; 467 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS;
457 req->in.h.opcode = FUSE_INIT; 468 req->in.h.opcode = FUSE_INIT;
458 req->in.numargs = 1; 469 req->in.numargs = 1;
459 req->in.args[0].size = sizeof(*arg); 470 req->in.args[0].size = sizeof(*arg);
@@ -469,10 +480,9 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
469 request_send_background(fc, req); 480 request_send_background(fc, req);
470} 481}
471 482
472static unsigned long long conn_id(void) 483static u64 conn_id(void)
473{ 484{
474 /* BKL is held for ->get_sb() */ 485 static u64 ctr = 1;
475 static unsigned long long ctr = 1;
476 return ctr++; 486 return ctr++;
477} 487}
478 488
@@ -486,6 +496,9 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
486 struct fuse_req *init_req; 496 struct fuse_req *init_req;
487 int err; 497 int err;
488 498
499 if (sb->s_flags & MS_MANDLOCK)
500 return -EINVAL;
501
489 if (!parse_fuse_opt((char *) data, &d)) 502 if (!parse_fuse_opt((char *) data, &d))
490 return -EINVAL; 503 return -EINVAL;
491 504
@@ -529,25 +542,21 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
529 if (!init_req) 542 if (!init_req)
530 goto err_put_root; 543 goto err_put_root;
531 544
532 err = kobject_set_name(&fc->kobj, "%llu", conn_id()); 545 mutex_lock(&fuse_mutex);
533 if (err)
534 goto err_free_req;
535
536 err = kobject_add(&fc->kobj);
537 if (err)
538 goto err_free_req;
539
540 /* Setting file->private_data can't race with other mount()
541 instances, since BKL is held for ->get_sb() */
542 err = -EINVAL; 546 err = -EINVAL;
543 if (file->private_data) 547 if (file->private_data)
544 goto err_kobject_del; 548 goto err_unlock;
545 549
550 fc->id = conn_id();
551 err = fuse_ctl_add_conn(fc);
552 if (err)
553 goto err_unlock;
554
555 list_add_tail(&fc->entry, &fuse_conn_list);
546 sb->s_root = root_dentry; 556 sb->s_root = root_dentry;
547 fc->mounted = 1;
548 fc->connected = 1; 557 fc->connected = 1;
549 kobject_get(&fc->kobj); 558 file->private_data = fuse_conn_get(fc);
550 file->private_data = fc; 559 mutex_unlock(&fuse_mutex);
551 /* 560 /*
552 * atomic_dec_and_test() in fput() provides the necessary 561 * atomic_dec_and_test() in fput() provides the necessary
553 * memory barrier for file->private_data to be visible on all 562 * memory barrier for file->private_data to be visible on all
@@ -559,15 +568,14 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
559 568
560 return 0; 569 return 0;
561 570
562 err_kobject_del: 571 err_unlock:
563 kobject_del(&fc->kobj); 572 mutex_unlock(&fuse_mutex);
564 err_free_req:
565 fuse_request_free(init_req); 573 fuse_request_free(init_req);
566 err_put_root: 574 err_put_root:
567 dput(root_dentry); 575 dput(root_dentry);
568 err: 576 err:
569 fput(file); 577 fput(file);
570 kobject_put(&fc->kobj); 578 fuse_conn_put(fc);
571 return err; 579 return err;
572} 580}
573 581
@@ -585,68 +593,8 @@ static struct file_system_type fuse_fs_type = {
585 .kill_sb = kill_anon_super, 593 .kill_sb = kill_anon_super,
586}; 594};
587 595
588static ssize_t fuse_conn_waiting_show(struct fuse_conn *fc, char *page)
589{
590 return sprintf(page, "%i\n", atomic_read(&fc->num_waiting));
591}
592
593static ssize_t fuse_conn_abort_store(struct fuse_conn *fc, const char *page,
594 size_t count)
595{
596 fuse_abort_conn(fc);
597 return count;
598}
599
600static struct fuse_conn_attr fuse_conn_waiting =
601 __ATTR(waiting, 0400, fuse_conn_waiting_show, NULL);
602static struct fuse_conn_attr fuse_conn_abort =
603 __ATTR(abort, 0600, NULL, fuse_conn_abort_store);
604
605static struct attribute *fuse_conn_attrs[] = {
606 &fuse_conn_waiting.attr,
607 &fuse_conn_abort.attr,
608 NULL,
609};
610
611static ssize_t fuse_conn_attr_show(struct kobject *kobj,
612 struct attribute *attr,
613 char *page)
614{
615 struct fuse_conn_attr *fca =
616 container_of(attr, struct fuse_conn_attr, attr);
617
618 if (fca->show)
619 return fca->show(get_fuse_conn_kobj(kobj), page);
620 else
621 return -EACCES;
622}
623
624static ssize_t fuse_conn_attr_store(struct kobject *kobj,
625 struct attribute *attr,
626 const char *page, size_t count)
627{
628 struct fuse_conn_attr *fca =
629 container_of(attr, struct fuse_conn_attr, attr);
630
631 if (fca->store)
632 return fca->store(get_fuse_conn_kobj(kobj), page, count);
633 else
634 return -EACCES;
635}
636
637static struct sysfs_ops fuse_conn_sysfs_ops = {
638 .show = &fuse_conn_attr_show,
639 .store = &fuse_conn_attr_store,
640};
641
642static struct kobj_type ktype_fuse_conn = {
643 .release = fuse_conn_release,
644 .sysfs_ops = &fuse_conn_sysfs_ops,
645 .default_attrs = fuse_conn_attrs,
646};
647
648static decl_subsys(fuse, NULL, NULL); 596static decl_subsys(fuse, NULL, NULL);
649static decl_subsys(connections, &ktype_fuse_conn, NULL); 597static decl_subsys(connections, NULL, NULL);
650 598
651static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep, 599static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
652 unsigned long flags) 600 unsigned long flags)
@@ -720,6 +668,7 @@ static int __init fuse_init(void)
720 printk("fuse init (API version %i.%i)\n", 668 printk("fuse init (API version %i.%i)\n",
721 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION); 669 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
722 670
671 INIT_LIST_HEAD(&fuse_conn_list);
723 res = fuse_fs_init(); 672 res = fuse_fs_init();
724 if (res) 673 if (res)
725 goto err; 674 goto err;
@@ -732,8 +681,14 @@ static int __init fuse_init(void)
732 if (res) 681 if (res)
733 goto err_dev_cleanup; 682 goto err_dev_cleanup;
734 683
684 res = fuse_ctl_init();
685 if (res)
686 goto err_sysfs_cleanup;
687
735 return 0; 688 return 0;
736 689
690 err_sysfs_cleanup:
691 fuse_sysfs_cleanup();
737 err_dev_cleanup: 692 err_dev_cleanup:
738 fuse_dev_cleanup(); 693 fuse_dev_cleanup();
739 err_fs_cleanup: 694 err_fs_cleanup:
@@ -746,6 +701,7 @@ static void __exit fuse_exit(void)
746{ 701{
747 printk(KERN_DEBUG "fuse exit\n"); 702 printk(KERN_DEBUG "fuse exit\n");
748 703
704 fuse_ctl_cleanup();
749 fuse_sysfs_cleanup(); 705 fuse_sysfs_cleanup();
750 fuse_fs_cleanup(); 706 fuse_fs_cleanup();
751 fuse_dev_cleanup(); 707 fuse_dev_cleanup();