diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 13:12:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-05-07 13:12:32 -0400 |
commit | a26ea93a3d19c2b79e8b382356014eba607ce477 (patch) | |
tree | 9a37d34ff60121c78123bc8bd4aab2b8b841e36b /fs/fuse/dev.c | |
parent | c818c778b0384e5d9e8184ec43b73e05a7ced86f (diff) | |
parent | 60b9df7a54804a965850db00beec4d3a2c002536 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse
Pull fuse updates from Miklos Szeredi:
"This contains two patchsets from Maxim Patlasov.
The first reworks the request throttling so that only async requests
are throttled. Wakeup of waiting async requests is also optimized.
The second series adds support for async processing of direct IO which
optimizes direct IO and enables the use of the AIO userspace
interface."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
fuse: add flag to turn on async direct IO
fuse: truncate file if async dio failed
fuse: optimize short direct reads
fuse: enable asynchronous processing direct IO
fuse: make fuse_direct_io() aware about AIO
fuse: add support of async IO
fuse: move fuse_release_user_pages() up
fuse: optimize wake_up
fuse: implement exclusive wakeup for blocked_waitq
fuse: skip blocking on allocations of synchronous requests
fuse: add flag fc->initialized
fuse: make request allocations for background processing explicit
Diffstat (limited to 'fs/fuse/dev.c')
-rw-r--r-- | fs/fuse/dev.c | 80 |
1 files changed, 63 insertions, 17 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 9bfd1a3214e6..a6c1664e330b 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -111,7 +111,7 @@ static void restore_sigs(sigset_t *oldset) | |||
111 | sigprocmask(SIG_SETMASK, oldset, NULL); | 111 | sigprocmask(SIG_SETMASK, oldset, NULL); |
112 | } | 112 | } |
113 | 113 | ||
114 | static void __fuse_get_request(struct fuse_req *req) | 114 | void __fuse_get_request(struct fuse_req *req) |
115 | { | 115 | { |
116 | atomic_inc(&req->count); | 116 | atomic_inc(&req->count); |
117 | } | 117 | } |
@@ -130,20 +130,30 @@ static void fuse_req_init_context(struct fuse_req *req) | |||
130 | req->in.h.pid = current->pid; | 130 | req->in.h.pid = current->pid; |
131 | } | 131 | } |
132 | 132 | ||
133 | struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) | 133 | static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) |
134 | { | ||
135 | return !fc->initialized || (for_background && fc->blocked); | ||
136 | } | ||
137 | |||
138 | static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, | ||
139 | bool for_background) | ||
134 | { | 140 | { |
135 | struct fuse_req *req; | 141 | struct fuse_req *req; |
136 | sigset_t oldset; | ||
137 | int intr; | ||
138 | int err; | 142 | int err; |
139 | |||
140 | atomic_inc(&fc->num_waiting); | 143 | atomic_inc(&fc->num_waiting); |
141 | block_sigs(&oldset); | 144 | |
142 | intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked); | 145 | if (fuse_block_alloc(fc, for_background)) { |
143 | restore_sigs(&oldset); | 146 | sigset_t oldset; |
144 | err = -EINTR; | 147 | int intr; |
145 | if (intr) | 148 | |
146 | goto out; | 149 | block_sigs(&oldset); |
150 | intr = wait_event_interruptible_exclusive(fc->blocked_waitq, | ||
151 | !fuse_block_alloc(fc, for_background)); | ||
152 | restore_sigs(&oldset); | ||
153 | err = -EINTR; | ||
154 | if (intr) | ||
155 | goto out; | ||
156 | } | ||
147 | 157 | ||
148 | err = -ENOTCONN; | 158 | err = -ENOTCONN; |
149 | if (!fc->connected) | 159 | if (!fc->connected) |
@@ -151,19 +161,35 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) | |||
151 | 161 | ||
152 | req = fuse_request_alloc(npages); | 162 | req = fuse_request_alloc(npages); |
153 | err = -ENOMEM; | 163 | err = -ENOMEM; |
154 | if (!req) | 164 | if (!req) { |
165 | if (for_background) | ||
166 | wake_up(&fc->blocked_waitq); | ||
155 | goto out; | 167 | goto out; |
168 | } | ||
156 | 169 | ||
157 | fuse_req_init_context(req); | 170 | fuse_req_init_context(req); |
158 | req->waiting = 1; | 171 | req->waiting = 1; |
172 | req->background = for_background; | ||
159 | return req; | 173 | return req; |
160 | 174 | ||
161 | out: | 175 | out: |
162 | atomic_dec(&fc->num_waiting); | 176 | atomic_dec(&fc->num_waiting); |
163 | return ERR_PTR(err); | 177 | return ERR_PTR(err); |
164 | } | 178 | } |
179 | |||
180 | struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) | ||
181 | { | ||
182 | return __fuse_get_req(fc, npages, false); | ||
183 | } | ||
165 | EXPORT_SYMBOL_GPL(fuse_get_req); | 184 | EXPORT_SYMBOL_GPL(fuse_get_req); |
166 | 185 | ||
186 | struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, | ||
187 | unsigned npages) | ||
188 | { | ||
189 | return __fuse_get_req(fc, npages, true); | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(fuse_get_req_for_background); | ||
192 | |||
167 | /* | 193 | /* |
168 | * Return request in fuse_file->reserved_req. However that may | 194 | * Return request in fuse_file->reserved_req. However that may |
169 | * currently be in use. If that is the case, wait for it to become | 195 | * currently be in use. If that is the case, wait for it to become |
@@ -225,19 +251,31 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, | |||
225 | struct fuse_req *req; | 251 | struct fuse_req *req; |
226 | 252 | ||
227 | atomic_inc(&fc->num_waiting); | 253 | atomic_inc(&fc->num_waiting); |
228 | wait_event(fc->blocked_waitq, !fc->blocked); | 254 | wait_event(fc->blocked_waitq, fc->initialized); |
229 | req = fuse_request_alloc(0); | 255 | req = fuse_request_alloc(0); |
230 | if (!req) | 256 | if (!req) |
231 | req = get_reserved_req(fc, file); | 257 | req = get_reserved_req(fc, file); |
232 | 258 | ||
233 | fuse_req_init_context(req); | 259 | fuse_req_init_context(req); |
234 | req->waiting = 1; | 260 | req->waiting = 1; |
261 | req->background = 0; | ||
235 | return req; | 262 | return req; |
236 | } | 263 | } |
237 | 264 | ||
238 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) | 265 | void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
239 | { | 266 | { |
240 | if (atomic_dec_and_test(&req->count)) { | 267 | if (atomic_dec_and_test(&req->count)) { |
268 | if (unlikely(req->background)) { | ||
269 | /* | ||
270 | * We get here in the unlikely case that a background | ||
271 | * request was allocated but not sent | ||
272 | */ | ||
273 | spin_lock(&fc->lock); | ||
274 | if (!fc->blocked) | ||
275 | wake_up(&fc->blocked_waitq); | ||
276 | spin_unlock(&fc->lock); | ||
277 | } | ||
278 | |||
241 | if (req->waiting) | 279 | if (req->waiting) |
242 | atomic_dec(&fc->num_waiting); | 280 | atomic_dec(&fc->num_waiting); |
243 | 281 | ||
@@ -335,10 +373,15 @@ __releases(fc->lock) | |||
335 | list_del(&req->intr_entry); | 373 | list_del(&req->intr_entry); |
336 | req->state = FUSE_REQ_FINISHED; | 374 | req->state = FUSE_REQ_FINISHED; |
337 | if (req->background) { | 375 | if (req->background) { |
338 | if (fc->num_background == fc->max_background) { | 376 | req->background = 0; |
377 | |||
378 | if (fc->num_background == fc->max_background) | ||
339 | fc->blocked = 0; | 379 | fc->blocked = 0; |
340 | wake_up_all(&fc->blocked_waitq); | 380 | |
341 | } | 381 | /* Wake up next waiter, if any */ |
382 | if (!fc->blocked && waitqueue_active(&fc->blocked_waitq)) | ||
383 | wake_up(&fc->blocked_waitq); | ||
384 | |||
342 | if (fc->num_background == fc->congestion_threshold && | 385 | if (fc->num_background == fc->congestion_threshold && |
343 | fc->connected && fc->bdi_initialized) { | 386 | fc->connected && fc->bdi_initialized) { |
344 | clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); | 387 | clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); |
@@ -442,6 +485,7 @@ __acquires(fc->lock) | |||
442 | 485 | ||
443 | static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) | 486 | static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) |
444 | { | 487 | { |
488 | BUG_ON(req->background); | ||
445 | spin_lock(&fc->lock); | 489 | spin_lock(&fc->lock); |
446 | if (!fc->connected) | 490 | if (!fc->connected) |
447 | req->out.h.error = -ENOTCONN; | 491 | req->out.h.error = -ENOTCONN; |
@@ -469,7 +513,7 @@ EXPORT_SYMBOL_GPL(fuse_request_send); | |||
469 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, | 513 | static void fuse_request_send_nowait_locked(struct fuse_conn *fc, |
470 | struct fuse_req *req) | 514 | struct fuse_req *req) |
471 | { | 515 | { |
472 | req->background = 1; | 516 | BUG_ON(!req->background); |
473 | fc->num_background++; | 517 | fc->num_background++; |
474 | if (fc->num_background == fc->max_background) | 518 | if (fc->num_background == fc->max_background) |
475 | fc->blocked = 1; | 519 | fc->blocked = 1; |
@@ -2071,6 +2115,7 @@ void fuse_abort_conn(struct fuse_conn *fc) | |||
2071 | if (fc->connected) { | 2115 | if (fc->connected) { |
2072 | fc->connected = 0; | 2116 | fc->connected = 0; |
2073 | fc->blocked = 0; | 2117 | fc->blocked = 0; |
2118 | fc->initialized = 1; | ||
2074 | end_io_requests(fc); | 2119 | end_io_requests(fc); |
2075 | end_queued_requests(fc); | 2120 | end_queued_requests(fc); |
2076 | end_polls(fc); | 2121 | end_polls(fc); |
@@ -2089,6 +2134,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) | |||
2089 | spin_lock(&fc->lock); | 2134 | spin_lock(&fc->lock); |
2090 | fc->connected = 0; | 2135 | fc->connected = 0; |
2091 | fc->blocked = 0; | 2136 | fc->blocked = 0; |
2137 | fc->initialized = 1; | ||
2092 | end_queued_requests(fc); | 2138 | end_queued_requests(fc); |
2093 | end_polls(fc); | 2139 | end_polls(fc); |
2094 | wake_up_all(&fc->blocked_waitq); | 2140 | wake_up_all(&fc->blocked_waitq); |