aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
committerChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
commitcad321ad529400c6ab24c501a67c3be720a0744c (patch)
tree7ac2b81a914329b7ac78b4f2a4275d62484ef45e /fs/btrfs/disk-io.c
parent87b29b208c6c38f3446d2de6ece946e2459052cf (diff)
Btrfs: shift all end_io work to thread pools
bio_end_io for reads without checksumming on and btree writes were happening without using async thread pools. This means the extent_io.c code had to use spin_lock_irq and friends on the rb tree locks for extent state. There were some irq safe vs unsafe lock inversions between the delallock lock and the extent state locks. This patch gets rid of them by moving all end_io code into the thread pools. To avoid contention and deadlocks between the data end_io processing and the metadata end_io processing yet another thread pool is added to finish off metadata writes. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 541a8279ac71..04f8d7080b17 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -447,8 +447,12 @@ static void end_workqueue_bio(struct bio *bio, int err)
447 end_io_wq->work.flags = 0; 447 end_io_wq->work.flags = 0;
448 448
449 if (bio->bi_rw & (1 << BIO_RW)) { 449 if (bio->bi_rw & (1 << BIO_RW)) {
450 btrfs_queue_worker(&fs_info->endio_write_workers, 450 if (end_io_wq->metadata)
451 &end_io_wq->work); 451 btrfs_queue_worker(&fs_info->endio_meta_write_workers,
452 &end_io_wq->work);
453 else
454 btrfs_queue_worker(&fs_info->endio_write_workers,
455 &end_io_wq->work);
452 } else { 456 } else {
453 if (end_io_wq->metadata) 457 if (end_io_wq->metadata)
454 btrfs_queue_worker(&fs_info->endio_meta_workers, 458 btrfs_queue_worker(&fs_info->endio_meta_workers,
@@ -624,23 +628,24 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
624static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 628static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
625 int mirror_num, unsigned long bio_flags) 629 int mirror_num, unsigned long bio_flags)
626{ 630{
627 /* 631 int ret;
628 * kthread helpers are used to submit writes so that checksumming 632
629 * can happen in parallel across all CPUs 633 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
630 */ 634 bio, 1);
635 BUG_ON(ret);
636
631 if (!(rw & (1 << BIO_RW))) { 637 if (!(rw & (1 << BIO_RW))) {
632 int ret;
633 /* 638 /*
634 * called for a read, do the setup so that checksum validation 639 * called for a read, do the setup so that checksum validation
635 * can happen in the async kernel threads 640 * can happen in the async kernel threads
636 */ 641 */
637 ret = btrfs_bio_wq_end_io(BTRFS_I(inode)->root->fs_info,
638 bio, 1);
639 BUG_ON(ret);
640
641 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 642 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
642 mirror_num, 0); 643 mirror_num, 0);
643 } 644 }
645 /*
646 * kthread helpers are used to submit writes so that checksumming
647 * can happen in parallel across all CPUs
648 */
644 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 649 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
645 inode, rw, bio, mirror_num, 0, 650 inode, rw, bio, mirror_num, 0,
646 __btree_submit_bio_start, 651 __btree_submit_bio_start,
@@ -1350,12 +1355,13 @@ static void end_workqueue_fn(struct btrfs_work *work)
1350 bio = end_io_wq->bio; 1355 bio = end_io_wq->bio;
1351 fs_info = end_io_wq->info; 1356 fs_info = end_io_wq->info;
1352 1357
1353 /* metadata bios are special because the whole tree block must 1358 /* metadata bio reads are special because the whole tree block must
1354 * be checksummed at once. This makes sure the entire block is in 1359 * be checksummed at once. This makes sure the entire block is in
1355 * ram and up to date before trying to verify things. For 1360 * ram and up to date before trying to verify things. For
1356 * blocksize <= pagesize, it is basically a noop 1361 * blocksize <= pagesize, it is basically a noop
1357 */ 1362 */
1358 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) { 1363 if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
1364 !bio_ready_for_csum(bio)) {
1359 btrfs_queue_worker(&fs_info->endio_meta_workers, 1365 btrfs_queue_worker(&fs_info->endio_meta_workers,
1360 &end_io_wq->work); 1366 &end_io_wq->work);
1361 return; 1367 return;
@@ -1668,6 +1674,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1668 fs_info->thread_pool_size); 1674 fs_info->thread_pool_size);
1669 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta", 1675 btrfs_init_workers(&fs_info->endio_meta_workers, "endio-meta",
1670 fs_info->thread_pool_size); 1676 fs_info->thread_pool_size);
1677 btrfs_init_workers(&fs_info->endio_meta_write_workers,
1678 "endio-meta-write", fs_info->thread_pool_size);
1671 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write", 1679 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1672 fs_info->thread_pool_size); 1680 fs_info->thread_pool_size);
1673 1681
@@ -1677,6 +1685,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1677 */ 1685 */
1678 fs_info->endio_workers.idle_thresh = 4; 1686 fs_info->endio_workers.idle_thresh = 4;
1679 fs_info->endio_write_workers.idle_thresh = 64; 1687 fs_info->endio_write_workers.idle_thresh = 64;
1688 fs_info->endio_meta_write_workers.idle_thresh = 64;
1680 1689
1681 btrfs_start_workers(&fs_info->workers, 1); 1690 btrfs_start_workers(&fs_info->workers, 1);
1682 btrfs_start_workers(&fs_info->submit_workers, 1); 1691 btrfs_start_workers(&fs_info->submit_workers, 1);
@@ -1685,6 +1694,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1685 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size); 1694 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1686 btrfs_start_workers(&fs_info->endio_meta_workers, 1695 btrfs_start_workers(&fs_info->endio_meta_workers,
1687 fs_info->thread_pool_size); 1696 fs_info->thread_pool_size);
1697 btrfs_start_workers(&fs_info->endio_meta_write_workers,
1698 fs_info->thread_pool_size);
1688 btrfs_start_workers(&fs_info->endio_write_workers, 1699 btrfs_start_workers(&fs_info->endio_write_workers,
1689 fs_info->thread_pool_size); 1700 fs_info->thread_pool_size);
1690 1701
@@ -1866,6 +1877,7 @@ fail_sb_buffer:
1866 btrfs_stop_workers(&fs_info->workers); 1877 btrfs_stop_workers(&fs_info->workers);
1867 btrfs_stop_workers(&fs_info->endio_workers); 1878 btrfs_stop_workers(&fs_info->endio_workers);
1868 btrfs_stop_workers(&fs_info->endio_meta_workers); 1879 btrfs_stop_workers(&fs_info->endio_meta_workers);
1880 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
1869 btrfs_stop_workers(&fs_info->endio_write_workers); 1881 btrfs_stop_workers(&fs_info->endio_write_workers);
1870 btrfs_stop_workers(&fs_info->submit_workers); 1882 btrfs_stop_workers(&fs_info->submit_workers);
1871fail_iput: 1883fail_iput:
@@ -2253,6 +2265,7 @@ int close_ctree(struct btrfs_root *root)
2253 btrfs_stop_workers(&fs_info->workers); 2265 btrfs_stop_workers(&fs_info->workers);
2254 btrfs_stop_workers(&fs_info->endio_workers); 2266 btrfs_stop_workers(&fs_info->endio_workers);
2255 btrfs_stop_workers(&fs_info->endio_meta_workers); 2267 btrfs_stop_workers(&fs_info->endio_meta_workers);
2268 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2256 btrfs_stop_workers(&fs_info->endio_write_workers); 2269 btrfs_stop_workers(&fs_info->endio_write_workers);
2257 btrfs_stop_workers(&fs_info->submit_workers); 2270 btrfs_stop_workers(&fs_info->submit_workers);
2258 2271