aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
committerChris Mason <chris.mason@oracle.com>2008-12-17 14:51:42 -0500
commitcad321ad529400c6ab24c501a67c3be720a0744c (patch)
tree7ac2b81a914329b7ac78b4f2a4275d62484ef45e /fs/btrfs/inode.c
parent87b29b208c6c38f3446d2de6ece946e2459052cf (diff)
Btrfs: shift all end_io work to thread pools
bio_end_io for reads without checksumming on and btree writes were happening without using async thread pools. This means the extent_io.c code had to use spin_lock_irq and friends on the rb tree locks for extent state. There were some irq safe vs unsafe lock inversions between the delallock lock and the extent state locks. This patch gets rid of them by moving all end_io code into the thread pools. To avoid contention and deadlocks between the data end_io processing and the metadata end_io processing yet another thread pool is added to finish off metadata writes. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 0577e77e661d..068bad463387 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1282,8 +1282,8 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1282} 1282}
1283 1283
1284/* 1284/*
1285 * extent_io.c submission hook. This does the right thing for csum calculation on write, 1285 * extent_io.c submission hook. This does the right thing for csum calculation
1286 * or reading the csums from the tree before a read 1286 * on write, or reading the csums from the tree before a read
1287 */ 1287 */
1288static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 1288static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1289 int mirror_num, unsigned long bio_flags) 1289 int mirror_num, unsigned long bio_flags)
@@ -1292,11 +1292,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1292 int ret = 0; 1292 int ret = 0;
1293 int skip_sum; 1293 int skip_sum;
1294 1294
1295 skip_sum = btrfs_test_flag(inode, NODATASUM);
1296
1295 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1297 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1296 BUG_ON(ret); 1298 BUG_ON(ret);
1297 1299
1298 skip_sum = btrfs_test_flag(inode, NODATASUM);
1299
1300 if (!(rw & (1 << BIO_RW))) { 1300 if (!(rw & (1 << BIO_RW))) {
1301 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1301 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1302 return btrfs_submit_compressed_read(inode, bio, 1302 return btrfs_submit_compressed_read(inode, bio,
@@ -1648,13 +1648,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
1648 failrec->logical, failrec->len); 1648 failrec->logical, failrec->len);
1649 failrec->last_mirror++; 1649 failrec->last_mirror++;
1650 if (!state) { 1650 if (!state) {
1651 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock); 1651 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1652 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, 1652 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1653 failrec->start, 1653 failrec->start,
1654 EXTENT_LOCKED); 1654 EXTENT_LOCKED);
1655 if (state && state->start != failrec->start) 1655 if (state && state->start != failrec->start)
1656 state = NULL; 1656 state = NULL;
1657 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock); 1657 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1658 } 1658 }
1659 if (!state || failrec->last_mirror > num_copies) { 1659 if (!state || failrec->last_mirror > num_copies) {
1660 set_state_private(failure_tree, failrec->start, 0); 1660 set_state_private(failure_tree, failrec->start, 0);