aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-08-15 15:34:15 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:06 -0400
commit0986fe9eac24fd186927c3b87af51d62f8ab92cd (patch)
tree766cc26439f0f89fea5f45975cd2c595551e7e08
parentb720d209524fccdd33a499351f2ca880b8c0ffb4 (diff)
Btrfs: Count async bios separately from async checksum work items
Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c25
-rw-r--r--fs/btrfs/volumes.c6
3 files changed, 26 insertions, 6 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index c88f1e16ce2d..bb4a8d2200d0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -544,6 +544,7 @@ struct btrfs_fs_info {
544 struct list_head hashers; 544 struct list_head hashers;
545 struct list_head dead_roots; 545 struct list_head dead_roots;
546 atomic_t nr_async_submits; 546 atomic_t nr_async_submits;
547 atomic_t nr_async_bios;
547 548
548 /* 549 /*
549 * this is used by the balancing code to wait for all the pending 550 * this is used by the balancing code to wait for all the pending
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 99bd9f9b9eed..9902d29abd06 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -429,6 +429,21 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
429 return 0; 429 return 0;
430} 430}
431 431
432static int congested_async(struct btrfs_fs_info *info, int iodone)
433{
434 int limit = 256 * info->fs_devices->open_devices;
435
436 if (iodone)
437 limit = (limit * 3) / 2;
438 if (atomic_read(&info->nr_async_submits) > limit)
439 return 1;
440
441 limit = 8192 * info->fs_devices->open_devices;
442 if (iodone)
443 limit = (limit * 3) / 2;
444 return atomic_read(&info->nr_async_bios) > limit;
445}
446
432static void run_one_async_submit(struct btrfs_work *work) 447static void run_one_async_submit(struct btrfs_work *work)
433{ 448{
434 struct btrfs_fs_info *fs_info; 449 struct btrfs_fs_info *fs_info;
@@ -437,6 +452,11 @@ static void run_one_async_submit(struct btrfs_work *work)
437 async = container_of(work, struct async_submit_bio, work); 452 async = container_of(work, struct async_submit_bio, work);
438 fs_info = BTRFS_I(async->inode)->root->fs_info; 453 fs_info = BTRFS_I(async->inode)->root->fs_info;
439 atomic_dec(&fs_info->nr_async_submits); 454 atomic_dec(&fs_info->nr_async_submits);
455
456 if ((async->bio->bi_rw & (1 << BIO_RW)) &&
457 !congested_async(fs_info, 1)) {
458 clear_bdi_congested(&fs_info->bdi, WRITE);
459 }
440 async->submit_bio_hook(async->inode, async->rw, async->bio, 460 async->submit_bio_hook(async->inode, async->rw, async->bio,
441 async->mirror_num); 461 async->mirror_num);
442 kfree(async); 462 kfree(async);
@@ -938,15 +958,13 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
938{ 958{
939 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 959 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
940 int ret = 0; 960 int ret = 0;
941 int limit = 256 * info->fs_devices->open_devices;
942 struct list_head *cur; 961 struct list_head *cur;
943 struct btrfs_device *device; 962 struct btrfs_device *device;
944 struct backing_dev_info *bdi; 963 struct backing_dev_info *bdi;
945 964
946 if ((bdi_bits & (1 << BDI_write_congested)) && 965 if ((bdi_bits & (1 << BDI_write_congested)) &&
947 atomic_read(&info->nr_async_submits) > limit) { 966 congested_async(info, 0))
948 return 1; 967 return 1;
949 }
950 968
951 list_for_each(cur, &info->fs_devices->devices) { 969 list_for_each(cur, &info->fs_devices->devices) {
952 device = list_entry(cur, struct btrfs_device, dev_list); 970 device = list_entry(cur, struct btrfs_device, dev_list);
@@ -1250,6 +1268,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1250 INIT_LIST_HEAD(&fs_info->space_info); 1268 INIT_LIST_HEAD(&fs_info->space_info);
1251 btrfs_mapping_init(&fs_info->mapping_tree); 1269 btrfs_mapping_init(&fs_info->mapping_tree);
1252 atomic_set(&fs_info->nr_async_submits, 0); 1270 atomic_set(&fs_info->nr_async_submits, 0);
1271 atomic_set(&fs_info->nr_async_bios, 0);
1253 atomic_set(&fs_info->throttles, 0); 1272 atomic_set(&fs_info->throttles, 0);
1254 atomic_set(&fs_info->throttle_gen, 0); 1273 atomic_set(&fs_info->throttle_gen, 0);
1255 fs_info->sb = sb; 1274 fs_info->sb = sb;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 09311b3066df..23a5b0aba00a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -179,7 +179,7 @@ loop:
179 cur = pending; 179 cur = pending;
180 pending = pending->bi_next; 180 pending = pending->bi_next;
181 cur->bi_next = NULL; 181 cur->bi_next = NULL;
182 atomic_dec(&device->dev_root->fs_info->nr_async_submits); 182 atomic_dec(&device->dev_root->fs_info->nr_async_bios);
183 183
184 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 184 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
185 bio_get(cur); 185 bio_get(cur);
@@ -2145,12 +2145,12 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2145 } 2145 }
2146 2146
2147 /* 2147 /*
2148 * nr_async_sumbits allows us to reliably return congestion to the 2148 * nr_async_bios allows us to reliably return congestion to the
2149 * higher layers. Otherwise, the async bio makes it appear we have 2149 * higher layers. Otherwise, the async bio makes it appear we have
2150 * made progress against dirty pages when we've really just put it 2150 * made progress against dirty pages when we've really just put it
2151 * on a queue for later 2151 * on a queue for later
2152 */ 2152 */
2153 atomic_inc(&root->fs_info->nr_async_submits); 2153 atomic_inc(&root->fs_info->nr_async_bios);
2154 WARN_ON(bio->bi_next); 2154 WARN_ON(bio->bi_next);
2155 bio->bi_next = NULL; 2155 bio->bi_next = NULL;
2156 bio->bi_rw |= rw; 2156 bio->bi_rw |= rw;