aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-11-13 09:59:36 -0500
committerChris Mason <chris.mason@oracle.com>2008-11-13 09:59:36 -0500
commit6f3577bdc768e6dae3c4d419e89b5a904f470728 (patch)
tree69d0df75fe55f7c4e9b2f1a1651478e37a3d2920
parent5b050f04c8ce911c5b6831305a24d70eab95e732 (diff)
Btrfs: Improve metadata read latencies
This fixes latency problems on metadata reads by making sure they don't go through the async submit queue, and by tuning down the amount of readahead done during btree searches. Also, the btrfs bdi congestion function is tuned to ignore the number of pending async bios and checksums pending. There is additional code that throttles new async bios now and the congestion function doesn't need to worry about it anymore. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/ctree.c8
-rw-r--r--fs/btrfs/disk-io.c6
2 files changed, 7 insertions, 7 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index f82f8db02275..ac61c50a3311 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1285,16 +1285,16 @@ static noinline void reada_for_search(struct btrfs_root *root,
1285 } 1285 }
1286 search = btrfs_node_blockptr(node, nr); 1286 search = btrfs_node_blockptr(node, nr);
1287 if ((search >= lowest_read && search <= highest_read) || 1287 if ((search >= lowest_read && search <= highest_read) ||
1288 (search < lowest_read && lowest_read - search <= 32768) || 1288 (search < lowest_read && lowest_read - search <= 16384) ||
1289 (search > highest_read && search - highest_read <= 32768)) { 1289 (search > highest_read && search - highest_read <= 16384)) {
1290 readahead_tree_block(root, search, blocksize, 1290 readahead_tree_block(root, search, blocksize,
1291 btrfs_node_ptr_generation(node, nr)); 1291 btrfs_node_ptr_generation(node, nr));
1292 nread += blocksize; 1292 nread += blocksize;
1293 } 1293 }
1294 nscan++; 1294 nscan++;
1295 if (path->reada < 2 && (nread > (256 * 1024) || nscan > 32)) 1295 if (path->reada < 2 && (nread > (64 * 1024) || nscan > 32))
1296 break; 1296 break;
1297 if(nread > (1024 * 1024) || nscan > 128) 1297 if(nread > (256 * 1024) || nscan > 128)
1298 break; 1298 break;
1299 1299
1300 if (search < lowest_read) 1300 if (search < lowest_read)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1bb54d69fbb2..3b0e974a9e9c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -605,7 +605,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
605 BUG_ON(ret); 605 BUG_ON(ret);
606 606
607 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 607 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
608 mirror_num, 1); 608 mirror_num, 0);
609 } 609 }
610 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info, 610 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
611 inode, rw, bio, mirror_num, 0, 611 inode, rw, bio, mirror_num, 0,
@@ -1139,11 +1139,11 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
1139 struct list_head *cur; 1139 struct list_head *cur;
1140 struct btrfs_device *device; 1140 struct btrfs_device *device;
1141 struct backing_dev_info *bdi; 1141 struct backing_dev_info *bdi;
1142 1142#if 0
1143 if ((bdi_bits & (1 << BDI_write_congested)) && 1143 if ((bdi_bits & (1 << BDI_write_congested)) &&
1144 btrfs_congested_async(info, 0)) 1144 btrfs_congested_async(info, 0))
1145 return 1; 1145 return 1;
1146 1146#endif
1147 list_for_each(cur, &info->fs_devices->devices) { 1147 list_for_each(cur, &info->fs_devices->devices) {
1148 device = list_entry(cur, struct btrfs_device, dev_list); 1148 device = list_entry(cur, struct btrfs_device, dev_list);
1149 if (!device->bdev) 1149 if (!device->bdev)