aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorNicholas Swenson <nks@daterainc.com>2014-01-09 19:03:04 -0500
committerKent Overstreet <kmo@daterainc.com>2014-03-18 15:22:33 -0400
commitda415a096fc06e49d1a15f7a06bcfe6ad44c5d38 (patch)
tree54a5b937d4acd579f43b9dc062afd41ecaa33d82 /drivers/md/bcache/request.c
parent90db6919f5f1614d1b7a92052445506bc6c564d2 (diff)
bcache: Fix moving_gc deadlocking with a foreground write
Deadlock happened because a foreground write slept, waiting for a bucket to be allocated. Normally the gc would mark buckets available for invalidation. But the moving_gc was stuck waiting for outstanding writes to complete. These writes used the bcache_wq, the same queue foreground writes used. This fix gives moving_gc its own work queue, so it was still finish moving even if foreground writes are stuck waiting for allocation. It also makes work queue a parameter to the data_insert path, so moving_gc can use its workqueue for writes. Signed-off-by: Nicholas Swenson <nks@daterainc.com> Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index fc14ba3f6d05..3e880869871f 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -248,7 +248,7 @@ static void bch_data_insert_keys(struct closure *cl)
248 atomic_dec_bug(journal_ref); 248 atomic_dec_bug(journal_ref);
249 249
250 if (!op->insert_data_done) 250 if (!op->insert_data_done)
251 continue_at(cl, bch_data_insert_start, bcache_wq); 251 continue_at(cl, bch_data_insert_start, op->wq);
252 252
253 bch_keylist_free(&op->insert_keys); 253 bch_keylist_free(&op->insert_keys);
254 closure_return(cl); 254 closure_return(cl);
@@ -297,7 +297,7 @@ static void bch_data_invalidate(struct closure *cl)
297 op->insert_data_done = true; 297 op->insert_data_done = true;
298 bio_put(bio); 298 bio_put(bio);
299out: 299out:
300 continue_at(cl, bch_data_insert_keys, bcache_wq); 300 continue_at(cl, bch_data_insert_keys, op->wq);
301} 301}
302 302
303static void bch_data_insert_error(struct closure *cl) 303static void bch_data_insert_error(struct closure *cl)
@@ -340,7 +340,7 @@ static void bch_data_insert_endio(struct bio *bio, int error)
340 if (op->writeback) 340 if (op->writeback)
341 op->error = error; 341 op->error = error;
342 else if (!op->replace) 342 else if (!op->replace)
343 set_closure_fn(cl, bch_data_insert_error, bcache_wq); 343 set_closure_fn(cl, bch_data_insert_error, op->wq);
344 else 344 else
345 set_closure_fn(cl, NULL, NULL); 345 set_closure_fn(cl, NULL, NULL);
346 } 346 }
@@ -376,7 +376,7 @@ static void bch_data_insert_start(struct closure *cl)
376 if (bch_keylist_realloc(&op->insert_keys, 376 if (bch_keylist_realloc(&op->insert_keys,
377 3 + (op->csum ? 1 : 0), 377 3 + (op->csum ? 1 : 0),
378 op->c)) 378 op->c))
379 continue_at(cl, bch_data_insert_keys, bcache_wq); 379 continue_at(cl, bch_data_insert_keys, op->wq);
380 380
381 k = op->insert_keys.top; 381 k = op->insert_keys.top;
382 bkey_init(k); 382 bkey_init(k);
@@ -413,7 +413,7 @@ static void bch_data_insert_start(struct closure *cl)
413 } while (n != bio); 413 } while (n != bio);
414 414
415 op->insert_data_done = true; 415 op->insert_data_done = true;
416 continue_at(cl, bch_data_insert_keys, bcache_wq); 416 continue_at(cl, bch_data_insert_keys, op->wq);
417err: 417err:
418 /* bch_alloc_sectors() blocks if s->writeback = true */ 418 /* bch_alloc_sectors() blocks if s->writeback = true */
419 BUG_ON(op->writeback); 419 BUG_ON(op->writeback);
@@ -442,7 +442,7 @@ err:
442 bio_put(bio); 442 bio_put(bio);
443 443
444 if (!bch_keylist_empty(&op->insert_keys)) 444 if (!bch_keylist_empty(&op->insert_keys))
445 continue_at(cl, bch_data_insert_keys, bcache_wq); 445 continue_at(cl, bch_data_insert_keys, op->wq);
446 else 446 else
447 closure_return(cl); 447 closure_return(cl);
448 } 448 }
@@ -824,6 +824,7 @@ static inline struct search *search_alloc(struct bio *bio,
824 s->iop.error = 0; 824 s->iop.error = 0;
825 s->iop.flags = 0; 825 s->iop.flags = 0;
826 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; 826 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
827 s->iop.wq = bcache_wq;
827 828
828 return s; 829 return s;
829} 830}