diff options
-rw-r--r-- | Documentation/filesystems/fuse.txt | 40 | ||||
-rw-r--r-- | fs/fuse/dev.c | 157 | ||||
-rw-r--r-- | fs/fuse/dir.c | 56 | ||||
-rw-r--r-- | fs/fuse/file.c | 49 | ||||
-rw-r--r-- | fs/fuse/fuse_i.h | 49 | ||||
-rw-r--r-- | fs/fuse/inode.c | 14 |
6 files changed, 112 insertions, 253 deletions
diff --git a/Documentation/filesystems/fuse.txt b/Documentation/filesystems/fuse.txt index 33f74310d161..e7747774ceb9 100644 --- a/Documentation/filesystems/fuse.txt +++ b/Documentation/filesystems/fuse.txt | |||
@@ -304,25 +304,7 @@ Scenario 1 - Simple deadlock | |||
304 | | | for "file"] | 304 | | | for "file"] |
305 | | | *DEADLOCK* | 305 | | | *DEADLOCK* |
306 | 306 | ||
307 | The solution for this is to allow requests to be interrupted while | 307 | The solution for this is to allow the filesystem to be aborted. |
308 | they are in userspace: | ||
309 | |||
310 | | [interrupted by signal] | | ||
311 | | <fuse_unlink() | | ||
312 | | [release semaphore] | [semaphore acquired] | ||
313 | | <sys_unlink() | | ||
314 | | | >fuse_unlink() | ||
315 | | | [queue req on fc->pending] | ||
316 | | | [wake up fc->waitq] | ||
317 | | | [sleep on req->waitq] | ||
318 | |||
319 | If the filesystem daemon was single threaded, this will stop here, | ||
320 | since there's no other thread to dequeue and execute the request. | ||
321 | In this case the solution is to kill the FUSE daemon as well. If | ||
322 | there are multiple serving threads, you just have to kill them as | ||
323 | long as any remain. | ||
324 | |||
325 | Moral: a filesystem which deadlocks, can soon find itself dead. | ||
326 | 308 | ||
327 | Scenario 2 - Tricky deadlock | 309 | Scenario 2 - Tricky deadlock |
328 | ---------------------------- | 310 | ---------------------------- |
@@ -355,24 +337,14 @@ but is caused by a pagefault. | |||
355 | | | [lock page] | 337 | | | [lock page] |
356 | | | * DEADLOCK * | 338 | | | * DEADLOCK * |
357 | 339 | ||
358 | Solution is again to let the the request be interrupted (not | 340 | Solution is basically the same as above. |
359 | elaborated further). | ||
360 | 341 | ||
361 | An additional problem is that while the write buffer is being | 342 | An additional problem is that while the write buffer is being |
362 | copied to the request, the request must not be interrupted. This | 343 | copied to the request, the request must not be interrupted. This |
363 | is because the destination address of the copy may not be valid | 344 | is because the destination address of the copy may not be valid |
364 | after the request is interrupted. | 345 | after the request is interrupted. |
365 | 346 | ||
366 | This is solved with doing the copy atomically, and allowing | 347 | This is solved with doing the copy atomically, and allowing abort |
367 | interruption while the page(s) belonging to the write buffer are | 348 | while the page(s) belonging to the write buffer are faulted with |
368 | faulted with get_user_pages(). The 'req->locked' flag indicates | 349 | get_user_pages(). The 'req->locked' flag indicates when the copy is |
369 | when the copy is taking place, and interruption is delayed until | 350 | taking place, and abort is delayed until this flag is unset. |
370 | this flag is unset. | ||
371 | |||
372 | Scenario 3 - Tricky deadlock with asynchronous read | ||
373 | --------------------------------------------------- | ||
374 | |||
375 | The same situation as above, except thread-1 will wait on page lock | ||
376 | and hence it will be uninterruptible as well. The solution is to | ||
377 | abort the connection with forced umount (if mount is attached) or | ||
378 | through the abort attribute in sysfs. | ||
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 104a62dadb94..fec4779e2b55 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -64,18 +64,6 @@ static void restore_sigs(sigset_t *oldset) | |||
64 | sigprocmask(SIG_SETMASK, oldset, NULL); | 64 | sigprocmask(SIG_SETMASK, oldset, NULL); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * Reset request, so that it can be reused | ||
69 | * | ||
70 | * The caller must be _very_ careful to make sure, that it is holding | ||
71 | * the only reference to req | ||
72 | */ | ||
73 | void fuse_reset_request(struct fuse_req *req) | ||
74 | { | ||
75 | BUG_ON(atomic_read(&req->count) != 1); | ||
76 | fuse_request_init(req); | ||
77 | } | ||
78 | |||
79 | static void __fuse_get_request(struct fuse_req *req) | 67 | static void __fuse_get_request(struct fuse_req *req) |
80 | { | 68 | { |
81 | atomic_inc(&req->count); | 69 | atomic_inc(&req->count); |
@@ -103,6 +91,10 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc) | |||
103 | if (intr) | 91 | if (intr) |
104 | goto out; | 92 | goto out; |
105 | 93 | ||
94 | err = -ENOTCONN; | ||
95 | if (!fc->connected) | ||
96 | goto out; | ||
97 | |||
106 | req = fuse_request_alloc(); | 98 | req = fuse_request_alloc(); |
107 | err = -ENOMEM; | 99 | err = -ENOMEM; |
108 | if (!req) | 100 | if (!req) |
@@ -129,113 +121,38 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | |||
129 | } | 121 | } |
130 | 122 | ||
131 | /* | 123 | /* |
132 | * Called with sbput_sem held for read (request_end) or write | ||
133 | * (fuse_put_super). By the time fuse_put_super() is finished, all | ||
134 | * inodes belonging to background requests must be released, so the | ||
135 | * iputs have to be done within the locked region. | ||
136 | */ | ||
137 | void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) | ||
138 | { | ||
139 | iput(req->inode); | ||
140 | iput(req->inode2); | ||
141 | spin_lock(&fc->lock); | ||
142 | list_del(&req->bg_entry); | ||
143 | if (fc->num_background == FUSE_MAX_BACKGROUND) { | ||
144 | fc->blocked = 0; | ||
145 | wake_up_all(&fc->blocked_waitq); | ||
146 | } | ||
147 | fc->num_background--; | ||
148 | spin_unlock(&fc->lock); | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * This function is called when a request is finished. Either a reply | 124 | * This function is called when a request is finished. Either a reply |
153 | * has arrived or it was interrupted (and not yet sent) or some error | 125 | * has arrived or it was interrupted (and not yet sent) or some error |
154 | * occurred during communication with userspace, or the device file | 126 | * occurred during communication with userspace, or the device file |
155 | * was closed. In case of a background request the reference to the | 127 | * was closed. The requester thread is woken up (if still waiting), |
156 | * stored objects are released. The requester thread is woken up (if | 128 | * the 'end' callback is called if given, else the reference to the |
157 | * still waiting), the 'end' callback is called if given, else the | 129 | * request is released |
158 | * reference to the request is released | ||
159 | * | ||
160 | * Releasing extra reference for foreground requests must be done | ||
161 | * within the same locked region as setting state to finished. This | ||
162 | * is because fuse_reset_request() may be called after request is | ||
163 | * finished and it must be the sole possessor. If request is | ||
164 | * interrupted and put in the background, it will return with an error | ||
165 | * and hence never be reset and reused. | ||
166 | * | 130 | * |
167 | * Called with fc->lock, unlocks it | 131 | * Called with fc->lock, unlocks it |
168 | */ | 132 | */ |
169 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 133 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
170 | { | 134 | { |
135 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | ||
136 | req->end = NULL; | ||
171 | list_del(&req->list); | 137 | list_del(&req->list); |
172 | req->state = FUSE_REQ_FINISHED; | 138 | req->state = FUSE_REQ_FINISHED; |
173 | if (!req->background) { | 139 | if (req->background) { |
174 | spin_unlock(&fc->lock); | 140 | if (fc->num_background == FUSE_MAX_BACKGROUND) { |
175 | wake_up(&req->waitq); | 141 | fc->blocked = 0; |
176 | fuse_put_request(fc, req); | 142 | wake_up_all(&fc->blocked_waitq); |
177 | } else { | 143 | } |
178 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 144 | fc->num_background--; |
179 | req->end = NULL; | ||
180 | spin_unlock(&fc->lock); | ||
181 | down_read(&fc->sbput_sem); | ||
182 | if (fc->mounted) | ||
183 | fuse_release_background(fc, req); | ||
184 | up_read(&fc->sbput_sem); | ||
185 | |||
186 | /* fput must go outside sbput_sem, otherwise it can deadlock */ | ||
187 | if (req->file) | ||
188 | fput(req->file); | ||
189 | |||
190 | if (end) | ||
191 | end(fc, req); | ||
192 | else | ||
193 | fuse_put_request(fc, req); | ||
194 | } | 145 | } |
195 | } | 146 | spin_unlock(&fc->lock); |
196 | 147 | dput(req->dentry); | |
197 | /* | 148 | mntput(req->vfsmount); |
198 | * Unfortunately request interruption not just solves the deadlock | ||
199 | * problem, it causes problems too. These stem from the fact, that an | ||
200 | * interrupted request is continued to be processed in userspace, | ||
201 | * while all the locks and object references (inode and file) held | ||
202 | * during the operation are released. | ||
203 | * | ||
204 | * To release the locks is exactly why there's a need to interrupt the | ||
205 | * request, so there's not a lot that can be done about this, except | ||
206 | * introduce additional locking in userspace. | ||
207 | * | ||
208 | * More important is to keep inode and file references until userspace | ||
209 | * has replied, otherwise FORGET and RELEASE could be sent while the | ||
210 | * inode/file is still used by the filesystem. | ||
211 | * | ||
212 | * For this reason the concept of "background" request is introduced. | ||
213 | * An interrupted request is backgrounded if it has been already sent | ||
214 | * to userspace. Backgrounding involves getting an extra reference to | ||
215 | * inode(s) or file used in the request, and adding the request to | ||
216 | * fc->background list. When a reply is received for a background | ||
217 | * request, the object references are released, and the request is | ||
218 | * removed from the list. If the filesystem is unmounted while there | ||
219 | * are still background requests, the list is walked and references | ||
220 | * are released as if a reply was received. | ||
221 | * | ||
222 | * There's one more use for a background request. The RELEASE message is | ||
223 | * always sent as background, since it doesn't return an error or | ||
224 | * data. | ||
225 | */ | ||
226 | static void background_request(struct fuse_conn *fc, struct fuse_req *req) | ||
227 | { | ||
228 | req->background = 1; | ||
229 | list_add(&req->bg_entry, &fc->background); | ||
230 | fc->num_background++; | ||
231 | if (fc->num_background == FUSE_MAX_BACKGROUND) | ||
232 | fc->blocked = 1; | ||
233 | if (req->inode) | ||
234 | req->inode = igrab(req->inode); | ||
235 | if (req->inode2) | ||
236 | req->inode2 = igrab(req->inode2); | ||
237 | if (req->file) | 149 | if (req->file) |
238 | get_file(req->file); | 150 | fput(req->file); |
151 | wake_up(&req->waitq); | ||
152 | if (end) | ||
153 | end(fc, req); | ||
154 | else | ||
155 | fuse_put_request(fc, req); | ||
239 | } | 156 | } |
240 | 157 | ||
241 | /* Called with fc->lock held. Releases, and then reacquires it. */ | 158 | /* Called with fc->lock held. Releases, and then reacquires it. */ |
@@ -244,9 +161,14 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
244 | sigset_t oldset; | 161 | sigset_t oldset; |
245 | 162 | ||
246 | spin_unlock(&fc->lock); | 163 | spin_unlock(&fc->lock); |
247 | block_sigs(&oldset); | 164 | if (req->force) |
248 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); | 165 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); |
249 | restore_sigs(&oldset); | 166 | else { |
167 | block_sigs(&oldset); | ||
168 | wait_event_interruptible(req->waitq, | ||
169 | req->state == FUSE_REQ_FINISHED); | ||
170 | restore_sigs(&oldset); | ||
171 | } | ||
250 | spin_lock(&fc->lock); | 172 | spin_lock(&fc->lock); |
251 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) | 173 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
252 | return; | 174 | return; |
@@ -268,8 +190,11 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
268 | if (req->state == FUSE_REQ_PENDING) { | 190 | if (req->state == FUSE_REQ_PENDING) { |
269 | list_del(&req->list); | 191 | list_del(&req->list); |
270 | __fuse_put_request(req); | 192 | __fuse_put_request(req); |
271 | } else if (req->state == FUSE_REQ_SENT) | 193 | } else if (req->state == FUSE_REQ_SENT) { |
272 | background_request(fc, req); | 194 | spin_unlock(&fc->lock); |
195 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); | ||
196 | spin_lock(&fc->lock); | ||
197 | } | ||
273 | } | 198 | } |
274 | 199 | ||
275 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) | 200 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
@@ -327,8 +252,12 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req) | |||
327 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) | 252 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
328 | { | 253 | { |
329 | spin_lock(&fc->lock); | 254 | spin_lock(&fc->lock); |
330 | background_request(fc, req); | ||
331 | if (fc->connected) { | 255 | if (fc->connected) { |
256 | req->background = 1; | ||
257 | fc->num_background++; | ||
258 | if (fc->num_background == FUSE_MAX_BACKGROUND) | ||
259 | fc->blocked = 1; | ||
260 | |||
332 | queue_request(fc, req); | 261 | queue_request(fc, req); |
333 | spin_unlock(&fc->lock); | 262 | spin_unlock(&fc->lock); |
334 | } else { | 263 | } else { |
@@ -883,10 +812,12 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
883 | spin_lock(&fc->lock); | 812 | spin_lock(&fc->lock); |
884 | if (fc->connected) { | 813 | if (fc->connected) { |
885 | fc->connected = 0; | 814 | fc->connected = 0; |
815 | fc->blocked = 0; | ||
886 | end_io_requests(fc); | 816 | end_io_requests(fc); |
887 | end_requests(fc, &fc->pending); | 817 | end_requests(fc, &fc->pending); |
888 | end_requests(fc, &fc->processing); | 818 | end_requests(fc, &fc->processing); |
889 | wake_up_all(&fc->waitq); | 819 | wake_up_all(&fc->waitq); |
820 | wake_up_all(&fc->blocked_waitq); | ||
890 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 821 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
891 | } | 822 | } |
892 | spin_unlock(&fc->lock); | 823 | spin_unlock(&fc->lock); |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 8d7546e832e8..72a74cde6de8 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | FUSE: Filesystem in Userspace | 2 | FUSE: Filesystem in Userspace |
3 | Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu> | 3 | Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu> |
4 | 4 | ||
5 | This program can be distributed under the terms of the GNU GPL. | 5 | This program can be distributed under the terms of the GNU GPL. |
6 | See the file COPYING. | 6 | See the file COPYING. |
@@ -79,7 +79,6 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir, | |||
79 | { | 79 | { |
80 | req->in.h.opcode = FUSE_LOOKUP; | 80 | req->in.h.opcode = FUSE_LOOKUP; |
81 | req->in.h.nodeid = get_node_id(dir); | 81 | req->in.h.nodeid = get_node_id(dir); |
82 | req->inode = dir; | ||
83 | req->in.numargs = 1; | 82 | req->in.numargs = 1; |
84 | req->in.args[0].size = entry->d_name.len + 1; | 83 | req->in.args[0].size = entry->d_name.len + 1; |
85 | req->in.args[0].value = entry->d_name.name; | 84 | req->in.args[0].value = entry->d_name.name; |
@@ -225,6 +224,20 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, | |||
225 | } | 224 | } |
226 | 225 | ||
227 | /* | 226 | /* |
227 | * Synchronous release for the case when something goes wrong in CREATE_OPEN | ||
228 | */ | ||
229 | static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff, | ||
230 | u64 nodeid, int flags) | ||
231 | { | ||
232 | struct fuse_req *req; | ||
233 | |||
234 | req = fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE); | ||
235 | req->force = 1; | ||
236 | request_send(fc, req); | ||
237 | fuse_put_request(fc, req); | ||
238 | } | ||
239 | |||
240 | /* | ||
228 | * Atomic create+open operation | 241 | * Atomic create+open operation |
229 | * | 242 | * |
230 | * If the filesystem doesn't support this, then fall back to separate | 243 | * If the filesystem doesn't support this, then fall back to separate |
@@ -237,6 +250,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
237 | struct inode *inode; | 250 | struct inode *inode; |
238 | struct fuse_conn *fc = get_fuse_conn(dir); | 251 | struct fuse_conn *fc = get_fuse_conn(dir); |
239 | struct fuse_req *req; | 252 | struct fuse_req *req; |
253 | struct fuse_req *forget_req; | ||
240 | struct fuse_open_in inarg; | 254 | struct fuse_open_in inarg; |
241 | struct fuse_open_out outopen; | 255 | struct fuse_open_out outopen; |
242 | struct fuse_entry_out outentry; | 256 | struct fuse_entry_out outentry; |
@@ -247,9 +261,14 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
247 | if (fc->no_create) | 261 | if (fc->no_create) |
248 | return -ENOSYS; | 262 | return -ENOSYS; |
249 | 263 | ||
264 | forget_req = fuse_get_req(fc); | ||
265 | if (IS_ERR(forget_req)) | ||
266 | return PTR_ERR(forget_req); | ||
267 | |||
250 | req = fuse_get_req(fc); | 268 | req = fuse_get_req(fc); |
269 | err = PTR_ERR(req); | ||
251 | if (IS_ERR(req)) | 270 | if (IS_ERR(req)) |
252 | return PTR_ERR(req); | 271 | goto out_put_forget_req; |
253 | 272 | ||
254 | err = -ENOMEM; | 273 | err = -ENOMEM; |
255 | ff = fuse_file_alloc(); | 274 | ff = fuse_file_alloc(); |
@@ -262,7 +281,6 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
262 | inarg.mode = mode; | 281 | inarg.mode = mode; |
263 | req->in.h.opcode = FUSE_CREATE; | 282 | req->in.h.opcode = FUSE_CREATE; |
264 | req->in.h.nodeid = get_node_id(dir); | 283 | req->in.h.nodeid = get_node_id(dir); |
265 | req->inode = dir; | ||
266 | req->in.numargs = 2; | 284 | req->in.numargs = 2; |
267 | req->in.args[0].size = sizeof(inarg); | 285 | req->in.args[0].size = sizeof(inarg); |
268 | req->in.args[0].value = &inarg; | 286 | req->in.args[0].value = &inarg; |
@@ -285,25 +303,23 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
285 | if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid)) | 303 | if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid)) |
286 | goto out_free_ff; | 304 | goto out_free_ff; |
287 | 305 | ||
306 | fuse_put_request(fc, req); | ||
288 | inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, | 307 | inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, |
289 | &outentry.attr); | 308 | &outentry.attr); |
290 | err = -ENOMEM; | ||
291 | if (!inode) { | 309 | if (!inode) { |
292 | flags &= ~(O_CREAT | O_EXCL | O_TRUNC); | 310 | flags &= ~(O_CREAT | O_EXCL | O_TRUNC); |
293 | ff->fh = outopen.fh; | 311 | ff->fh = outopen.fh; |
294 | /* Special release, with inode = NULL, this will | 312 | fuse_sync_release(fc, ff, outentry.nodeid, flags); |
295 | trigger a 'forget' request when the release is | 313 | fuse_send_forget(fc, forget_req, outentry.nodeid, 1); |
296 | complete */ | 314 | return -ENOMEM; |
297 | fuse_send_release(fc, ff, outentry.nodeid, NULL, flags, 0); | ||
298 | goto out_put_request; | ||
299 | } | 315 | } |
300 | fuse_put_request(fc, req); | 316 | fuse_put_request(fc, forget_req); |
301 | d_instantiate(entry, inode); | 317 | d_instantiate(entry, inode); |
302 | fuse_change_timeout(entry, &outentry); | 318 | fuse_change_timeout(entry, &outentry); |
303 | file = lookup_instantiate_filp(nd, entry, generic_file_open); | 319 | file = lookup_instantiate_filp(nd, entry, generic_file_open); |
304 | if (IS_ERR(file)) { | 320 | if (IS_ERR(file)) { |
305 | ff->fh = outopen.fh; | 321 | ff->fh = outopen.fh; |
306 | fuse_send_release(fc, ff, outentry.nodeid, inode, flags, 0); | 322 | fuse_sync_release(fc, ff, outentry.nodeid, flags); |
307 | return PTR_ERR(file); | 323 | return PTR_ERR(file); |
308 | } | 324 | } |
309 | fuse_finish_open(inode, file, ff, &outopen); | 325 | fuse_finish_open(inode, file, ff, &outopen); |
@@ -313,6 +329,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
313 | fuse_file_free(ff); | 329 | fuse_file_free(ff); |
314 | out_put_request: | 330 | out_put_request: |
315 | fuse_put_request(fc, req); | 331 | fuse_put_request(fc, req); |
332 | out_put_forget_req: | ||
333 | fuse_put_request(fc, forget_req); | ||
316 | return err; | 334 | return err; |
317 | } | 335 | } |
318 | 336 | ||
@@ -328,7 +346,6 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, | |||
328 | int err; | 346 | int err; |
329 | 347 | ||
330 | req->in.h.nodeid = get_node_id(dir); | 348 | req->in.h.nodeid = get_node_id(dir); |
331 | req->inode = dir; | ||
332 | req->out.numargs = 1; | 349 | req->out.numargs = 1; |
333 | req->out.args[0].size = sizeof(outarg); | 350 | req->out.args[0].size = sizeof(outarg); |
334 | req->out.args[0].value = &outarg; | 351 | req->out.args[0].value = &outarg; |
@@ -448,7 +465,6 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) | |||
448 | 465 | ||
449 | req->in.h.opcode = FUSE_UNLINK; | 466 | req->in.h.opcode = FUSE_UNLINK; |
450 | req->in.h.nodeid = get_node_id(dir); | 467 | req->in.h.nodeid = get_node_id(dir); |
451 | req->inode = dir; | ||
452 | req->in.numargs = 1; | 468 | req->in.numargs = 1; |
453 | req->in.args[0].size = entry->d_name.len + 1; | 469 | req->in.args[0].size = entry->d_name.len + 1; |
454 | req->in.args[0].value = entry->d_name.name; | 470 | req->in.args[0].value = entry->d_name.name; |
@@ -480,7 +496,6 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) | |||
480 | 496 | ||
481 | req->in.h.opcode = FUSE_RMDIR; | 497 | req->in.h.opcode = FUSE_RMDIR; |
482 | req->in.h.nodeid = get_node_id(dir); | 498 | req->in.h.nodeid = get_node_id(dir); |
483 | req->inode = dir; | ||
484 | req->in.numargs = 1; | 499 | req->in.numargs = 1; |
485 | req->in.args[0].size = entry->d_name.len + 1; | 500 | req->in.args[0].size = entry->d_name.len + 1; |
486 | req->in.args[0].value = entry->d_name.name; | 501 | req->in.args[0].value = entry->d_name.name; |
@@ -510,8 +525,6 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent, | |||
510 | inarg.newdir = get_node_id(newdir); | 525 | inarg.newdir = get_node_id(newdir); |
511 | req->in.h.opcode = FUSE_RENAME; | 526 | req->in.h.opcode = FUSE_RENAME; |
512 | req->in.h.nodeid = get_node_id(olddir); | 527 | req->in.h.nodeid = get_node_id(olddir); |
513 | req->inode = olddir; | ||
514 | req->inode2 = newdir; | ||
515 | req->in.numargs = 3; | 528 | req->in.numargs = 3; |
516 | req->in.args[0].size = sizeof(inarg); | 529 | req->in.args[0].size = sizeof(inarg); |
517 | req->in.args[0].value = &inarg; | 530 | req->in.args[0].value = &inarg; |
@@ -558,7 +571,6 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, | |||
558 | memset(&inarg, 0, sizeof(inarg)); | 571 | memset(&inarg, 0, sizeof(inarg)); |
559 | inarg.oldnodeid = get_node_id(inode); | 572 | inarg.oldnodeid = get_node_id(inode); |
560 | req->in.h.opcode = FUSE_LINK; | 573 | req->in.h.opcode = FUSE_LINK; |
561 | req->inode2 = inode; | ||
562 | req->in.numargs = 2; | 574 | req->in.numargs = 2; |
563 | req->in.args[0].size = sizeof(inarg); | 575 | req->in.args[0].size = sizeof(inarg); |
564 | req->in.args[0].value = &inarg; | 576 | req->in.args[0].value = &inarg; |
@@ -587,7 +599,6 @@ int fuse_do_getattr(struct inode *inode) | |||
587 | 599 | ||
588 | req->in.h.opcode = FUSE_GETATTR; | 600 | req->in.h.opcode = FUSE_GETATTR; |
589 | req->in.h.nodeid = get_node_id(inode); | 601 | req->in.h.nodeid = get_node_id(inode); |
590 | req->inode = inode; | ||
591 | req->out.numargs = 1; | 602 | req->out.numargs = 1; |
592 | req->out.args[0].size = sizeof(arg); | 603 | req->out.args[0].size = sizeof(arg); |
593 | req->out.args[0].value = &arg; | 604 | req->out.args[0].value = &arg; |
@@ -679,7 +690,6 @@ static int fuse_access(struct inode *inode, int mask) | |||
679 | inarg.mask = mask; | 690 | inarg.mask = mask; |
680 | req->in.h.opcode = FUSE_ACCESS; | 691 | req->in.h.opcode = FUSE_ACCESS; |
681 | req->in.h.nodeid = get_node_id(inode); | 692 | req->in.h.nodeid = get_node_id(inode); |
682 | req->inode = inode; | ||
683 | req->in.numargs = 1; | 693 | req->in.numargs = 1; |
684 | req->in.args[0].size = sizeof(inarg); | 694 | req->in.args[0].size = sizeof(inarg); |
685 | req->in.args[0].value = &inarg; | 695 | req->in.args[0].value = &inarg; |
@@ -820,7 +830,6 @@ static char *read_link(struct dentry *dentry) | |||
820 | } | 830 | } |
821 | req->in.h.opcode = FUSE_READLINK; | 831 | req->in.h.opcode = FUSE_READLINK; |
822 | req->in.h.nodeid = get_node_id(inode); | 832 | req->in.h.nodeid = get_node_id(inode); |
823 | req->inode = inode; | ||
824 | req->out.argvar = 1; | 833 | req->out.argvar = 1; |
825 | req->out.numargs = 1; | 834 | req->out.numargs = 1; |
826 | req->out.args[0].size = PAGE_SIZE - 1; | 835 | req->out.args[0].size = PAGE_SIZE - 1; |
@@ -939,7 +948,6 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr) | |||
939 | iattr_to_fattr(attr, &inarg); | 948 | iattr_to_fattr(attr, &inarg); |
940 | req->in.h.opcode = FUSE_SETATTR; | 949 | req->in.h.opcode = FUSE_SETATTR; |
941 | req->in.h.nodeid = get_node_id(inode); | 950 | req->in.h.nodeid = get_node_id(inode); |
942 | req->inode = inode; | ||
943 | req->in.numargs = 1; | 951 | req->in.numargs = 1; |
944 | req->in.args[0].size = sizeof(inarg); | 952 | req->in.args[0].size = sizeof(inarg); |
945 | req->in.args[0].value = &inarg; | 953 | req->in.args[0].value = &inarg; |
@@ -1002,7 +1010,6 @@ static int fuse_setxattr(struct dentry *entry, const char *name, | |||
1002 | inarg.flags = flags; | 1010 | inarg.flags = flags; |
1003 | req->in.h.opcode = FUSE_SETXATTR; | 1011 | req->in.h.opcode = FUSE_SETXATTR; |
1004 | req->in.h.nodeid = get_node_id(inode); | 1012 | req->in.h.nodeid = get_node_id(inode); |
1005 | req->inode = inode; | ||
1006 | req->in.numargs = 3; | 1013 | req->in.numargs = 3; |
1007 | req->in.args[0].size = sizeof(inarg); | 1014 | req->in.args[0].size = sizeof(inarg); |
1008 | req->in.args[0].value = &inarg; | 1015 | req->in.args[0].value = &inarg; |
@@ -1041,7 +1048,6 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name, | |||
1041 | inarg.size = size; | 1048 | inarg.size = size; |
1042 | req->in.h.opcode = FUSE_GETXATTR; | 1049 | req->in.h.opcode = FUSE_GETXATTR; |
1043 | req->in.h.nodeid = get_node_id(inode); | 1050 | req->in.h.nodeid = get_node_id(inode); |
1044 | req->inode = inode; | ||
1045 | req->in.numargs = 2; | 1051 | req->in.numargs = 2; |
1046 | req->in.args[0].size = sizeof(inarg); | 1052 | req->in.args[0].size = sizeof(inarg); |
1047 | req->in.args[0].value = &inarg; | 1053 | req->in.args[0].value = &inarg; |
@@ -1091,7 +1097,6 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size) | |||
1091 | inarg.size = size; | 1097 | inarg.size = size; |
1092 | req->in.h.opcode = FUSE_LISTXATTR; | 1098 | req->in.h.opcode = FUSE_LISTXATTR; |
1093 | req->in.h.nodeid = get_node_id(inode); | 1099 | req->in.h.nodeid = get_node_id(inode); |
1094 | req->inode = inode; | ||
1095 | req->in.numargs = 1; | 1100 | req->in.numargs = 1; |
1096 | req->in.args[0].size = sizeof(inarg); | 1101 | req->in.args[0].size = sizeof(inarg); |
1097 | req->in.args[0].value = &inarg; | 1102 | req->in.args[0].value = &inarg; |
@@ -1135,7 +1140,6 @@ static int fuse_removexattr(struct dentry *entry, const char *name) | |||
1135 | 1140 | ||
1136 | req->in.h.opcode = FUSE_REMOVEXATTR; | 1141 | req->in.h.opcode = FUSE_REMOVEXATTR; |
1137 | req->in.h.nodeid = get_node_id(inode); | 1142 | req->in.h.nodeid = get_node_id(inode); |
1138 | req->inode = inode; | ||
1139 | req->in.numargs = 1; | 1143 | req->in.numargs = 1; |
1140 | req->in.args[0].size = strlen(name) + 1; | 1144 | req->in.args[0].size = strlen(name) + 1; |
1141 | req->in.args[0].value = name; | 1145 | req->in.args[0].value = name; |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 087f3b734f40..1d59af306b28 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -30,7 +30,6 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir, | |||
30 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); | 30 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); |
31 | req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; | 31 | req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; |
32 | req->in.h.nodeid = get_node_id(inode); | 32 | req->in.h.nodeid = get_node_id(inode); |
33 | req->inode = inode; | ||
34 | req->in.numargs = 1; | 33 | req->in.numargs = 1; |
35 | req->in.args[0].size = sizeof(inarg); | 34 | req->in.args[0].size = sizeof(inarg); |
36 | req->in.args[0].value = &inarg; | 35 | req->in.args[0].value = &inarg; |
@@ -113,37 +112,22 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir) | |||
113 | return err; | 112 | return err; |
114 | } | 113 | } |
115 | 114 | ||
116 | /* Special case for failed iget in CREATE */ | 115 | struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, |
117 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) | 116 | int opcode) |
118 | { | 117 | { |
119 | /* If called from end_io_requests(), req has more than one | 118 | struct fuse_req *req = ff->release_req; |
120 | reference and fuse_reset_request() cannot work */ | ||
121 | if (fc->connected) { | ||
122 | u64 nodeid = req->in.h.nodeid; | ||
123 | fuse_reset_request(req); | ||
124 | fuse_send_forget(fc, req, nodeid, 1); | ||
125 | } else | ||
126 | fuse_put_request(fc, req); | ||
127 | } | ||
128 | |||
129 | void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, | ||
130 | u64 nodeid, struct inode *inode, int flags, int isdir) | ||
131 | { | ||
132 | struct fuse_req * req = ff->release_req; | ||
133 | struct fuse_release_in *inarg = &req->misc.release_in; | 119 | struct fuse_release_in *inarg = &req->misc.release_in; |
134 | 120 | ||
135 | inarg->fh = ff->fh; | 121 | inarg->fh = ff->fh; |
136 | inarg->flags = flags; | 122 | inarg->flags = flags; |
137 | req->in.h.opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; | 123 | req->in.h.opcode = opcode; |
138 | req->in.h.nodeid = nodeid; | 124 | req->in.h.nodeid = nodeid; |
139 | req->inode = inode; | ||
140 | req->in.numargs = 1; | 125 | req->in.numargs = 1; |
141 | req->in.args[0].size = sizeof(struct fuse_release_in); | 126 | req->in.args[0].size = sizeof(struct fuse_release_in); |
142 | req->in.args[0].value = inarg; | 127 | req->in.args[0].value = inarg; |
143 | request_send_background(fc, req); | ||
144 | if (!inode) | ||
145 | req->end = fuse_release_end; | ||
146 | kfree(ff); | 128 | kfree(ff); |
129 | |||
130 | return req; | ||
147 | } | 131 | } |
148 | 132 | ||
149 | int fuse_release_common(struct inode *inode, struct file *file, int isdir) | 133 | int fuse_release_common(struct inode *inode, struct file *file, int isdir) |
@@ -151,8 +135,15 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir) | |||
151 | struct fuse_file *ff = file->private_data; | 135 | struct fuse_file *ff = file->private_data; |
152 | if (ff) { | 136 | if (ff) { |
153 | struct fuse_conn *fc = get_fuse_conn(inode); | 137 | struct fuse_conn *fc = get_fuse_conn(inode); |
154 | u64 nodeid = get_node_id(inode); | 138 | struct fuse_req *req; |
155 | fuse_send_release(fc, ff, nodeid, inode, file->f_flags, isdir); | 139 | |
140 | req = fuse_release_fill(ff, get_node_id(inode), file->f_flags, | ||
141 | isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); | ||
142 | |||
143 | /* Hold vfsmount and dentry until release is finished */ | ||
144 | req->vfsmount = mntget(file->f_vfsmnt); | ||
145 | req->dentry = dget(file->f_dentry); | ||
146 | request_send_background(fc, req); | ||
156 | } | 147 | } |
157 | 148 | ||
158 | /* Return value is ignored by VFS */ | 149 | /* Return value is ignored by VFS */ |
@@ -192,8 +183,6 @@ static int fuse_flush(struct file *file, fl_owner_t id) | |||
192 | inarg.fh = ff->fh; | 183 | inarg.fh = ff->fh; |
193 | req->in.h.opcode = FUSE_FLUSH; | 184 | req->in.h.opcode = FUSE_FLUSH; |
194 | req->in.h.nodeid = get_node_id(inode); | 185 | req->in.h.nodeid = get_node_id(inode); |
195 | req->inode = inode; | ||
196 | req->file = file; | ||
197 | req->in.numargs = 1; | 186 | req->in.numargs = 1; |
198 | req->in.args[0].size = sizeof(inarg); | 187 | req->in.args[0].size = sizeof(inarg); |
199 | req->in.args[0].value = &inarg; | 188 | req->in.args[0].value = &inarg; |
@@ -232,8 +221,6 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, | |||
232 | inarg.fsync_flags = datasync ? 1 : 0; | 221 | inarg.fsync_flags = datasync ? 1 : 0; |
233 | req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; | 222 | req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; |
234 | req->in.h.nodeid = get_node_id(inode); | 223 | req->in.h.nodeid = get_node_id(inode); |
235 | req->inode = inode; | ||
236 | req->file = file; | ||
237 | req->in.numargs = 1; | 224 | req->in.numargs = 1; |
238 | req->in.args[0].size = sizeof(inarg); | 225 | req->in.args[0].size = sizeof(inarg); |
239 | req->in.args[0].value = &inarg; | 226 | req->in.args[0].value = &inarg; |
@@ -266,8 +253,6 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, | |||
266 | inarg->size = count; | 253 | inarg->size = count; |
267 | req->in.h.opcode = opcode; | 254 | req->in.h.opcode = opcode; |
268 | req->in.h.nodeid = get_node_id(inode); | 255 | req->in.h.nodeid = get_node_id(inode); |
269 | req->inode = inode; | ||
270 | req->file = file; | ||
271 | req->in.numargs = 1; | 256 | req->in.numargs = 1; |
272 | req->in.args[0].size = sizeof(struct fuse_read_in); | 257 | req->in.args[0].size = sizeof(struct fuse_read_in); |
273 | req->in.args[0].value = inarg; | 258 | req->in.args[0].value = inarg; |
@@ -342,6 +327,8 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file, | |||
342 | req->out.page_zeroing = 1; | 327 | req->out.page_zeroing = 1; |
343 | fuse_read_fill(req, file, inode, pos, count, FUSE_READ); | 328 | fuse_read_fill(req, file, inode, pos, count, FUSE_READ); |
344 | if (fc->async_read) { | 329 | if (fc->async_read) { |
330 | get_file(file); | ||
331 | req->file = file; | ||
345 | req->end = fuse_readpages_end; | 332 | req->end = fuse_readpages_end; |
346 | request_send_background(fc, req); | 333 | request_send_background(fc, req); |
347 | } else { | 334 | } else { |
@@ -420,8 +407,6 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file, | |||
420 | inarg.size = count; | 407 | inarg.size = count; |
421 | req->in.h.opcode = FUSE_WRITE; | 408 | req->in.h.opcode = FUSE_WRITE; |
422 | req->in.h.nodeid = get_node_id(inode); | 409 | req->in.h.nodeid = get_node_id(inode); |
423 | req->inode = inode; | ||
424 | req->file = file; | ||
425 | req->in.argpages = 1; | 410 | req->in.argpages = 1; |
426 | req->in.numargs = 2; | 411 | req->in.numargs = 2; |
427 | req->in.args[0].size = sizeof(struct fuse_write_in); | 412 | req->in.args[0].size = sizeof(struct fuse_write_in); |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0474202cb5dc..25f8581a770c 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -8,12 +8,12 @@ | |||
8 | 8 | ||
9 | #include <linux/fuse.h> | 9 | #include <linux/fuse.h> |
10 | #include <linux/fs.h> | 10 | #include <linux/fs.h> |
11 | #include <linux/mount.h> | ||
11 | #include <linux/wait.h> | 12 | #include <linux/wait.h> |
12 | #include <linux/list.h> | 13 | #include <linux/list.h> |
13 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
14 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
15 | #include <linux/backing-dev.h> | 16 | #include <linux/backing-dev.h> |
16 | #include <asm/semaphore.h> | ||
17 | 17 | ||
18 | /** Max number of pages that can be used in a single read request */ | 18 | /** Max number of pages that can be used in a single read request */ |
19 | #define FUSE_MAX_PAGES_PER_REQ 32 | 19 | #define FUSE_MAX_PAGES_PER_REQ 32 |
@@ -135,9 +135,6 @@ struct fuse_req { | |||
135 | fuse_conn */ | 135 | fuse_conn */ |
136 | struct list_head list; | 136 | struct list_head list; |
137 | 137 | ||
138 | /** Entry on the background list */ | ||
139 | struct list_head bg_entry; | ||
140 | |||
141 | /** refcount */ | 138 | /** refcount */ |
142 | atomic_t count; | 139 | atomic_t count; |
143 | 140 | ||
@@ -150,6 +147,9 @@ struct fuse_req { | |||
150 | /** True if the request has reply */ | 147 | /** True if the request has reply */ |
151 | unsigned isreply:1; | 148 | unsigned isreply:1; |
152 | 149 | ||
150 | /** Force sending of the request even if interrupted */ | ||
151 | unsigned force:1; | ||
152 | |||
153 | /** The request was interrupted */ | 153 | /** The request was interrupted */ |
154 | unsigned interrupted:1; | 154 | unsigned interrupted:1; |
155 | 155 | ||
@@ -192,15 +192,15 @@ struct fuse_req { | |||
192 | /** offset of data on first page */ | 192 | /** offset of data on first page */ |
193 | unsigned page_offset; | 193 | unsigned page_offset; |
194 | 194 | ||
195 | /** Inode used in the request */ | ||
196 | struct inode *inode; | ||
197 | |||
198 | /** Second inode used in the request (or NULL) */ | ||
199 | struct inode *inode2; | ||
200 | |||
201 | /** File used in the request (or NULL) */ | 195 | /** File used in the request (or NULL) */ |
202 | struct file *file; | 196 | struct file *file; |
203 | 197 | ||
198 | /** vfsmount used in release */ | ||
199 | struct vfsmount *vfsmount; | ||
200 | |||
201 | /** dentry used in release */ | ||
202 | struct dentry *dentry; | ||
203 | |||
204 | /** Request completion callback */ | 204 | /** Request completion callback */ |
205 | void (*end)(struct fuse_conn *, struct fuse_req *); | 205 | void (*end)(struct fuse_conn *, struct fuse_req *); |
206 | }; | 206 | }; |
@@ -243,10 +243,6 @@ struct fuse_conn { | |||
243 | /** The list of requests under I/O */ | 243 | /** The list of requests under I/O */ |
244 | struct list_head io; | 244 | struct list_head io; |
245 | 245 | ||
246 | /** Requests put in the background (RELEASE or any other | ||
247 | interrupted request) */ | ||
248 | struct list_head background; | ||
249 | |||
250 | /** Number of requests currently in the background */ | 246 | /** Number of requests currently in the background */ |
251 | unsigned num_background; | 247 | unsigned num_background; |
252 | 248 | ||
@@ -258,15 +254,9 @@ struct fuse_conn { | |||
258 | /** waitq for blocked connection */ | 254 | /** waitq for blocked connection */ |
259 | wait_queue_head_t blocked_waitq; | 255 | wait_queue_head_t blocked_waitq; |
260 | 256 | ||
261 | /** RW semaphore for exclusion with fuse_put_super() */ | ||
262 | struct rw_semaphore sbput_sem; | ||
263 | |||
264 | /** The next unique request id */ | 257 | /** The next unique request id */ |
265 | u64 reqctr; | 258 | u64 reqctr; |
266 | 259 | ||
267 | /** Mount is active */ | ||
268 | unsigned mounted; | ||
269 | |||
270 | /** Connection established, cleared on umount, connection | 260 | /** Connection established, cleared on umount, connection |
271 | abort and device release */ | 261 | abort and device release */ |
272 | unsigned connected; | 262 | unsigned connected; |
@@ -383,12 +373,9 @@ void fuse_file_free(struct fuse_file *ff); | |||
383 | void fuse_finish_open(struct inode *inode, struct file *file, | 373 | void fuse_finish_open(struct inode *inode, struct file *file, |
384 | struct fuse_file *ff, struct fuse_open_out *outarg); | 374 | struct fuse_file *ff, struct fuse_open_out *outarg); |
385 | 375 | ||
386 | /** | 376 | /** */ |
387 | * Send a RELEASE request | 377 | struct fuse_req *fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, |
388 | */ | 378 | int opcode); |
389 | void fuse_send_release(struct fuse_conn *fc, struct fuse_file *ff, | ||
390 | u64 nodeid, struct inode *inode, int flags, int isdir); | ||
391 | |||
392 | /** | 379 | /** |
393 | * Send RELEASE or RELEASEDIR request | 380 | * Send RELEASE or RELEASEDIR request |
394 | */ | 381 | */ |
@@ -446,11 +433,6 @@ struct fuse_req *fuse_request_alloc(void); | |||
446 | void fuse_request_free(struct fuse_req *req); | 433 | void fuse_request_free(struct fuse_req *req); |
447 | 434 | ||
448 | /** | 435 | /** |
449 | * Reinitialize a request, the preallocated flag is left unmodified | ||
450 | */ | ||
451 | void fuse_reset_request(struct fuse_req *req); | ||
452 | |||
453 | /** | ||
454 | * Reserve a preallocated request | 436 | * Reserve a preallocated request |
455 | */ | 437 | */ |
456 | struct fuse_req *fuse_get_req(struct fuse_conn *fc); | 438 | struct fuse_req *fuse_get_req(struct fuse_conn *fc); |
@@ -476,11 +458,6 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req); | |||
476 | */ | 458 | */ |
477 | void request_send_background(struct fuse_conn *fc, struct fuse_req *req); | 459 | void request_send_background(struct fuse_conn *fc, struct fuse_req *req); |
478 | 460 | ||
479 | /** | ||
480 | * Release inodes and file associated with background request | ||
481 | */ | ||
482 | void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req); | ||
483 | |||
484 | /* Abort all requests */ | 461 | /* Abort all requests */ |
485 | void fuse_abort_conn(struct fuse_conn *fc); | 462 | void fuse_abort_conn(struct fuse_conn *fc); |
486 | 463 | ||
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index a13c0f529058..0225729977c4 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/pagemap.h> | 11 | #include <linux/pagemap.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/file.h> | 13 | #include <linux/file.h> |
14 | #include <linux/mount.h> | ||
15 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
16 | #include <linux/init.h> | 15 | #include <linux/init.h> |
17 | #include <linux/module.h> | 16 | #include <linux/module.h> |
@@ -204,20 +203,14 @@ static void fuse_put_super(struct super_block *sb) | |||
204 | { | 203 | { |
205 | struct fuse_conn *fc = get_fuse_conn_super(sb); | 204 | struct fuse_conn *fc = get_fuse_conn_super(sb); |
206 | 205 | ||
207 | down_write(&fc->sbput_sem); | ||
208 | while (!list_empty(&fc->background)) | ||
209 | fuse_release_background(fc, | ||
210 | list_entry(fc->background.next, | ||
211 | struct fuse_req, bg_entry)); | ||
212 | |||
213 | spin_lock(&fc->lock); | 206 | spin_lock(&fc->lock); |
214 | fc->mounted = 0; | ||
215 | fc->connected = 0; | 207 | fc->connected = 0; |
208 | fc->blocked = 0; | ||
216 | spin_unlock(&fc->lock); | 209 | spin_unlock(&fc->lock); |
217 | up_write(&fc->sbput_sem); | ||
218 | /* Flush all readers on this fs */ | 210 | /* Flush all readers on this fs */ |
219 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 211 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
220 | wake_up_all(&fc->waitq); | 212 | wake_up_all(&fc->waitq); |
213 | wake_up_all(&fc->blocked_waitq); | ||
221 | kobject_del(&fc->kobj); | 214 | kobject_del(&fc->kobj); |
222 | kobject_put(&fc->kobj); | 215 | kobject_put(&fc->kobj); |
223 | } | 216 | } |
@@ -386,8 +379,6 @@ static struct fuse_conn *new_conn(void) | |||
386 | INIT_LIST_HEAD(&fc->pending); | 379 | INIT_LIST_HEAD(&fc->pending); |
387 | INIT_LIST_HEAD(&fc->processing); | 380 | INIT_LIST_HEAD(&fc->processing); |
388 | INIT_LIST_HEAD(&fc->io); | 381 | INIT_LIST_HEAD(&fc->io); |
389 | INIT_LIST_HEAD(&fc->background); | ||
390 | init_rwsem(&fc->sbput_sem); | ||
391 | kobj_set_kset_s(fc, connections_subsys); | 382 | kobj_set_kset_s(fc, connections_subsys); |
392 | kobject_init(&fc->kobj); | 383 | kobject_init(&fc->kobj); |
393 | atomic_set(&fc->num_waiting, 0); | 384 | atomic_set(&fc->num_waiting, 0); |
@@ -543,7 +534,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
543 | goto err_kobject_del; | 534 | goto err_kobject_del; |
544 | 535 | ||
545 | sb->s_root = root_dentry; | 536 | sb->s_root = root_dentry; |
546 | fc->mounted = 1; | ||
547 | fc->connected = 1; | 537 | fc->connected = 1; |
548 | kobject_get(&fc->kobj); | 538 | kobject_get(&fc->kobj); |
549 | file->private_data = fc; | 539 | file->private_data = fc; |