diff options
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 157 |
1 files changed, 44 insertions, 113 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 104a62dadb94..fec4779e2b55 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -64,18 +64,6 @@ static void restore_sigs(sigset_t *oldset) | |||
64 | sigprocmask(SIG_SETMASK, oldset, NULL); | 64 | sigprocmask(SIG_SETMASK, oldset, NULL); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * Reset request, so that it can be reused | ||
69 | * | ||
70 | * The caller must be _very_ careful to make sure, that it is holding | ||
71 | * the only reference to req | ||
72 | */ | ||
73 | void fuse_reset_request(struct fuse_req *req) | ||
74 | { | ||
75 | BUG_ON(atomic_read(&req->count) != 1); | ||
76 | fuse_request_init(req); | ||
77 | } | ||
78 | |||
79 | static void __fuse_get_request(struct fuse_req *req) | 67 | static void __fuse_get_request(struct fuse_req *req) |
80 | { | 68 | { |
81 | atomic_inc(&req->count); | 69 | atomic_inc(&req->count); |
@@ -103,6 +91,10 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc) | |||
103 | if (intr) | 91 | if (intr) |
104 | goto out; | 92 | goto out; |
105 | 93 | ||
94 | err = -ENOTCONN; | ||
95 | if (!fc->connected) | ||
96 | goto out; | ||
97 | |||
106 | req = fuse_request_alloc(); | 98 | req = fuse_request_alloc(); |
107 | err = -ENOMEM; | 99 | err = -ENOMEM; |
108 | if (!req) | 100 | if (!req) |
@@ -129,113 +121,38 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | |||
129 | } | 121 | } |
130 | 122 | ||
131 | /* | 123 | /* |
132 | * Called with sbput_sem held for read (request_end) or write | ||
133 | * (fuse_put_super). By the time fuse_put_super() is finished, all | ||
134 | * inodes belonging to background requests must be released, so the | ||
135 | * iputs have to be done within the locked region. | ||
136 | */ | ||
137 | void fuse_release_background(struct fuse_conn *fc, struct fuse_req *req) | ||
138 | { | ||
139 | iput(req->inode); | ||
140 | iput(req->inode2); | ||
141 | spin_lock(&fc->lock); | ||
142 | list_del(&req->bg_entry); | ||
143 | if (fc->num_background == FUSE_MAX_BACKGROUND) { | ||
144 | fc->blocked = 0; | ||
145 | wake_up_all(&fc->blocked_waitq); | ||
146 | } | ||
147 | fc->num_background--; | ||
148 | spin_unlock(&fc->lock); | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * This function is called when a request is finished. Either a reply | 124 | * This function is called when a request is finished. Either a reply |
153 | * has arrived or it was interrupted (and not yet sent) or some error | 125 | * has arrived or it was interrupted (and not yet sent) or some error |
154 | * occurred during communication with userspace, or the device file | 126 | * occurred during communication with userspace, or the device file |
155 | * was closed. In case of a background request the reference to the | 127 | * was closed. The requester thread is woken up (if still waiting), |
156 | * stored objects are released. The requester thread is woken up (if | 128 | * the 'end' callback is called if given, else the reference to the |
157 | * still waiting), the 'end' callback is called if given, else the | 129 | * request is released |
158 | * reference to the request is released | ||
159 | * | ||
160 | * Releasing extra reference for foreground requests must be done | ||
161 | * within the same locked region as setting state to finished. This | ||
162 | * is because fuse_reset_request() may be called after request is | ||
163 | * finished and it must be the sole possessor. If request is | ||
164 | * interrupted and put in the background, it will return with an error | ||
165 | * and hence never be reset and reused. | ||
166 | * | 130 | * |
167 | * Called with fc->lock, unlocks it | 131 | * Called with fc->lock, unlocks it |
168 | */ | 132 | */ |
169 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) | 133 | static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
170 | { | 134 | { |
135 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | ||
136 | req->end = NULL; | ||
171 | list_del(&req->list); | 137 | list_del(&req->list); |
172 | req->state = FUSE_REQ_FINISHED; | 138 | req->state = FUSE_REQ_FINISHED; |
173 | if (!req->background) { | 139 | if (req->background) { |
174 | spin_unlock(&fc->lock); | 140 | if (fc->num_background == FUSE_MAX_BACKGROUND) { |
175 | wake_up(&req->waitq); | 141 | fc->blocked = 0; |
176 | fuse_put_request(fc, req); | 142 | wake_up_all(&fc->blocked_waitq); |
177 | } else { | 143 | } |
178 | void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; | 144 | fc->num_background--; |
179 | req->end = NULL; | ||
180 | spin_unlock(&fc->lock); | ||
181 | down_read(&fc->sbput_sem); | ||
182 | if (fc->mounted) | ||
183 | fuse_release_background(fc, req); | ||
184 | up_read(&fc->sbput_sem); | ||
185 | |||
186 | /* fput must go outside sbput_sem, otherwise it can deadlock */ | ||
187 | if (req->file) | ||
188 | fput(req->file); | ||
189 | |||
190 | if (end) | ||
191 | end(fc, req); | ||
192 | else | ||
193 | fuse_put_request(fc, req); | ||
194 | } | 145 | } |
195 | } | 146 | spin_unlock(&fc->lock); |
196 | 147 | dput(req->dentry); | |
197 | /* | 148 | mntput(req->vfsmount); |
198 | * Unfortunately request interruption not just solves the deadlock | ||
199 | * problem, it causes problems too. These stem from the fact, that an | ||
200 | * interrupted request is continued to be processed in userspace, | ||
201 | * while all the locks and object references (inode and file) held | ||
202 | * during the operation are released. | ||
203 | * | ||
204 | * To release the locks is exactly why there's a need to interrupt the | ||
205 | * request, so there's not a lot that can be done about this, except | ||
206 | * introduce additional locking in userspace. | ||
207 | * | ||
208 | * More important is to keep inode and file references until userspace | ||
209 | * has replied, otherwise FORGET and RELEASE could be sent while the | ||
210 | * inode/file is still used by the filesystem. | ||
211 | * | ||
212 | * For this reason the concept of "background" request is introduced. | ||
213 | * An interrupted request is backgrounded if it has been already sent | ||
214 | * to userspace. Backgrounding involves getting an extra reference to | ||
215 | * inode(s) or file used in the request, and adding the request to | ||
216 | * fc->background list. When a reply is received for a background | ||
217 | * request, the object references are released, and the request is | ||
218 | * removed from the list. If the filesystem is unmounted while there | ||
219 | * are still background requests, the list is walked and references | ||
220 | * are released as if a reply was received. | ||
221 | * | ||
222 | * There's one more use for a background request. The RELEASE message is | ||
223 | * always sent as background, since it doesn't return an error or | ||
224 | * data. | ||
225 | */ | ||
226 | static void background_request(struct fuse_conn *fc, struct fuse_req *req) | ||
227 | { | ||
228 | req->background = 1; | ||
229 | list_add(&req->bg_entry, &fc->background); | ||
230 | fc->num_background++; | ||
231 | if (fc->num_background == FUSE_MAX_BACKGROUND) | ||
232 | fc->blocked = 1; | ||
233 | if (req->inode) | ||
234 | req->inode = igrab(req->inode); | ||
235 | if (req->inode2) | ||
236 | req->inode2 = igrab(req->inode2); | ||
237 | if (req->file) | 149 | if (req->file) |
238 | get_file(req->file); | 150 | fput(req->file); |
151 | wake_up(&req->waitq); | ||
152 | if (end) | ||
153 | end(fc, req); | ||
154 | else | ||
155 | fuse_put_request(fc, req); | ||
239 | } | 156 | } |
240 | 157 | ||
241 | /* Called with fc->lock held. Releases, and then reacquires it. */ | 158 | /* Called with fc->lock held. Releases, and then reacquires it. */ |
@@ -244,9 +161,14 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
244 | sigset_t oldset; | 161 | sigset_t oldset; |
245 | 162 | ||
246 | spin_unlock(&fc->lock); | 163 | spin_unlock(&fc->lock); |
247 | block_sigs(&oldset); | 164 | if (req->force) |
248 | wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); | 165 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); |
249 | restore_sigs(&oldset); | 166 | else { |
167 | block_sigs(&oldset); | ||
168 | wait_event_interruptible(req->waitq, | ||
169 | req->state == FUSE_REQ_FINISHED); | ||
170 | restore_sigs(&oldset); | ||
171 | } | ||
250 | spin_lock(&fc->lock); | 172 | spin_lock(&fc->lock); |
251 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) | 173 | if (req->state == FUSE_REQ_FINISHED && !req->interrupted) |
252 | return; | 174 | return; |
@@ -268,8 +190,11 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) | |||
268 | if (req->state == FUSE_REQ_PENDING) { | 190 | if (req->state == FUSE_REQ_PENDING) { |
269 | list_del(&req->list); | 191 | list_del(&req->list); |
270 | __fuse_put_request(req); | 192 | __fuse_put_request(req); |
271 | } else if (req->state == FUSE_REQ_SENT) | 193 | } else if (req->state == FUSE_REQ_SENT) { |
272 | background_request(fc, req); | 194 | spin_unlock(&fc->lock); |
195 | wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); | ||
196 | spin_lock(&fc->lock); | ||
197 | } | ||
273 | } | 198 | } |
274 | 199 | ||
275 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) | 200 | static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
@@ -327,8 +252,12 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req) | |||
327 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) | 252 | static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) |
328 | { | 253 | { |
329 | spin_lock(&fc->lock); | 254 | spin_lock(&fc->lock); |
330 | background_request(fc, req); | ||
331 | if (fc->connected) { | 255 | if (fc->connected) { |
256 | req->background = 1; | ||
257 | fc->num_background++; | ||
258 | if (fc->num_background == FUSE_MAX_BACKGROUND) | ||
259 | fc->blocked = 1; | ||
260 | |||
332 | queue_request(fc, req); | 261 | queue_request(fc, req); |
333 | spin_unlock(&fc->lock); | 262 | spin_unlock(&fc->lock); |
334 | } else { | 263 | } else { |
@@ -883,10 +812,12 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
883 | spin_lock(&fc->lock); | 812 | spin_lock(&fc->lock); |
884 | if (fc->connected) { | 813 | if (fc->connected) { |
885 | fc->connected = 0; | 814 | fc->connected = 0; |
815 | fc->blocked = 0; | ||
886 | end_io_requests(fc); | 816 | end_io_requests(fc); |
887 | end_requests(fc, &fc->pending); | 817 | end_requests(fc, &fc->pending); |
888 | end_requests(fc, &fc->processing); | 818 | end_requests(fc, &fc->processing); |
889 | wake_up_all(&fc->waitq); | 819 | wake_up_all(&fc->waitq); |
820 | wake_up_all(&fc->blocked_waitq); | ||
890 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); | 821 | kill_fasync(&fc->fasync, SIGIO, POLL_IN); |
891 | } | 822 | } |
892 | spin_unlock(&fc->lock); | 823 | spin_unlock(&fc->lock); |