aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fuse
diff options
context:
space:
mode:
authorMaxim Patlasov <mpatlasov@parallels.com>2013-03-21 10:02:36 -0400
committerMiklos Szeredi <mszeredi@suse.cz>2013-04-17 06:31:45 -0400
commit722d2bea8c601d0744e4a37170533fdf6214a678 (patch)
tree3762894f78f98474c73e1a8120da2dd1598c3673 /fs/fuse
parent0aada88476a33690c9569b094191ce92a38e6541 (diff)
fuse: implement exclusive wakeup for blocked_waitq
The patch solves thundering herd problem. So far as previous patches ensured that only allocations for background may block, it's safe to wake up one waiter. Whoever it is, it will wake up another one in request_end() afterwards. Signed-off-by: Maxim Patlasov <mpatlasov@parallels.com> Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/dev.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index d692b85115bd..367310588962 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -147,7 +147,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
147 int intr; 147 int intr;
148 148
149 block_sigs(&oldset); 149 block_sigs(&oldset);
150 intr = wait_event_interruptible(fc->blocked_waitq, 150 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
151 !fuse_block_alloc(fc, for_background)); 151 !fuse_block_alloc(fc, for_background));
152 restore_sigs(&oldset); 152 restore_sigs(&oldset);
153 err = -EINTR; 153 err = -EINTR;
@@ -161,8 +161,11 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
161 161
162 req = fuse_request_alloc(npages); 162 req = fuse_request_alloc(npages);
163 err = -ENOMEM; 163 err = -ENOMEM;
164 if (!req) 164 if (!req) {
165 if (for_background)
166 wake_up(&fc->blocked_waitq);
165 goto out; 167 goto out;
168 }
166 169
167 fuse_req_init_context(req); 170 fuse_req_init_context(req);
168 req->waiting = 1; 171 req->waiting = 1;
@@ -262,6 +265,17 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
262void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) 265void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
263{ 266{
264 if (atomic_dec_and_test(&req->count)) { 267 if (atomic_dec_and_test(&req->count)) {
268 if (unlikely(req->background)) {
269 /*
270 * We get here in the unlikely case that a background
271 * request was allocated but not sent
272 */
273 spin_lock(&fc->lock);
274 if (!fc->blocked)
275 wake_up(&fc->blocked_waitq);
276 spin_unlock(&fc->lock);
277 }
278
265 if (req->waiting) 279 if (req->waiting)
266 atomic_dec(&fc->num_waiting); 280 atomic_dec(&fc->num_waiting);
267 281
@@ -359,10 +373,15 @@ __releases(fc->lock)
359 list_del(&req->intr_entry); 373 list_del(&req->intr_entry);
360 req->state = FUSE_REQ_FINISHED; 374 req->state = FUSE_REQ_FINISHED;
361 if (req->background) { 375 if (req->background) {
362 if (fc->num_background == fc->max_background) { 376 req->background = 0;
377
378 if (fc->num_background == fc->max_background)
363 fc->blocked = 0; 379 fc->blocked = 0;
364 wake_up_all(&fc->blocked_waitq); 380
365 } 381 /* Wake up next waiter, if any */
382 if (!fc->blocked)
383 wake_up(&fc->blocked_waitq);
384
366 if (fc->num_background == fc->congestion_threshold && 385 if (fc->num_background == fc->congestion_threshold &&
367 fc->connected && fc->bdi_initialized) { 386 fc->connected && fc->bdi_initialized) {
368 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC); 387 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);