aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-05-15 16:15:45 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:03 -0400
commitcb03c743c6486e4efb8be6bdf7bf23177826d156 (patch)
treee5e9cfb8258c55946b0a551b8f89deb7a2b0267a /fs/btrfs/disk-io.c
parent211c17f51f46dc6c308c742098273dd46b5ca59c (diff)
Btrfs: Change the congestion functions to meter the number of async submits as well
The async submit workqueue was absorbing too many requests, leading to long stalls where the async submitters were stalling. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 264f297260f8..373374340e9b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -453,6 +453,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
453 453
454 spin_lock(&fs_info->async_submit_work_lock); 454 spin_lock(&fs_info->async_submit_work_lock);
455 list_add_tail(&async->list, &fs_info->async_submit_work_list); 455 list_add_tail(&async->list, &fs_info->async_submit_work_list);
456 atomic_inc(&fs_info->nr_async_submits);
456 spin_unlock(&fs_info->async_submit_work_lock); 457 spin_unlock(&fs_info->async_submit_work_lock);
457 458
458 queue_work(async_submit_workqueue, &fs_info->async_submit_work); 459 queue_work(async_submit_workqueue, &fs_info->async_submit_work);
@@ -906,10 +907,16 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
906{ 907{
907 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 908 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
908 int ret = 0; 909 int ret = 0;
910 int limit = 256 * info->fs_devices->open_devices;
909 struct list_head *cur; 911 struct list_head *cur;
910 struct btrfs_device *device; 912 struct btrfs_device *device;
911 struct backing_dev_info *bdi; 913 struct backing_dev_info *bdi;
912 914
915 if ((bdi_bits & (1 << BDI_write_congested)) &&
916 atomic_read(&info->nr_async_submits) > limit) {
917 return 1;
918 }
919
913 list_for_each(cur, &info->fs_devices->devices) { 920 list_for_each(cur, &info->fs_devices->devices) {
914 device = list_entry(cur, struct btrfs_device, dev_list); 921 device = list_entry(cur, struct btrfs_device, dev_list);
915 if (!device->bdev) 922 if (!device->bdev)
@@ -1117,6 +1124,7 @@ static void btrfs_async_submit_work(struct work_struct *work)
1117 } 1124 }
1118 next = fs_info->async_submit_work_list.next; 1125 next = fs_info->async_submit_work_list.next;
1119 list_del(next); 1126 list_del(next);
1127 atomic_dec(&fs_info->nr_async_submits);
1120 spin_unlock(&fs_info->async_submit_work_lock); 1128 spin_unlock(&fs_info->async_submit_work_lock);
1121 1129
1122 async = list_entry(next, struct async_submit_bio, list); 1130 async = list_entry(next, struct async_submit_bio, list);
@@ -1179,6 +1187,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1179 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1187 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1180 INIT_LIST_HEAD(&fs_info->space_info); 1188 INIT_LIST_HEAD(&fs_info->space_info);
1181 btrfs_mapping_init(&fs_info->mapping_tree); 1189 btrfs_mapping_init(&fs_info->mapping_tree);
1190 atomic_set(&fs_info->nr_async_submits, 0);
1182 fs_info->sb = sb; 1191 fs_info->sb = sb;
1183 fs_info->max_extent = (u64)-1; 1192 fs_info->max_extent = (u64)-1;
1184 fs_info->max_inline = 8192 * 1024; 1193 fs_info->max_inline = 8192 * 1024;