aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/volumes.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-08-20 13:39:41 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:06 -0400
commitb64a2851ba25b3410a87d3d1b751155612105c8e (patch)
tree5bad12d67cfdd6657acbe1091dd1c196e5e17818 /fs/btrfs/volumes.c
parent4d1b5fb4d7075f862848dbff8873e22382abd482 (diff)
Btrfs: Wait for async bio submissions to make some progress at queue time
Before, the btrfs bdi congestion function was used to test for too many async bios. This keeps that check to throttle pdflush, but also adds a check while queuing bios. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r--fs/btrfs/volumes.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 23a5b0aba00a..2652660e6079 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -138,12 +138,18 @@ int run_scheduled_bios(struct btrfs_device *device)
138{ 138{
139 struct bio *pending; 139 struct bio *pending;
140 struct backing_dev_info *bdi; 140 struct backing_dev_info *bdi;
141 struct btrfs_fs_info *fs_info;
141 struct bio *tail; 142 struct bio *tail;
142 struct bio *cur; 143 struct bio *cur;
143 int again = 0; 144 int again = 0;
144 unsigned long num_run = 0; 145 unsigned long num_run = 0;
146 unsigned long limit;
145 147
146 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info; 148 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
149 fs_info = device->dev_root->fs_info;
150 limit = btrfs_async_submit_limit(fs_info);
151 limit = limit * 2 / 3;
152
147loop: 153loop:
148 spin_lock(&device->io_lock); 154 spin_lock(&device->io_lock);
149 155
@@ -179,7 +185,11 @@ loop:
179 cur = pending; 185 cur = pending;
180 pending = pending->bi_next; 186 pending = pending->bi_next;
181 cur->bi_next = NULL; 187 cur->bi_next = NULL;
182 atomic_dec(&device->dev_root->fs_info->nr_async_bios); 188 atomic_dec(&fs_info->nr_async_bios);
189
190 if (atomic_read(&fs_info->nr_async_bios) < limit &&
191 waitqueue_active(&fs_info->async_submit_wait))
192 wake_up(&fs_info->async_submit_wait);
183 193
184 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 194 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
185 bio_get(cur); 195 bio_get(cur);
@@ -2135,6 +2145,7 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2135 int rw, struct bio *bio) 2145 int rw, struct bio *bio)
2136{ 2146{
2137 int should_queue = 1; 2147 int should_queue = 1;
2148 unsigned long limit;
2138 2149
2139 /* don't bother with additional async steps for reads, right now */ 2150 /* don't bother with additional async steps for reads, right now */
2140 if (!(rw & (1 << BIO_RW))) { 2151 if (!(rw & (1 << BIO_RW))) {
@@ -2171,6 +2182,11 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2171 if (should_queue) 2182 if (should_queue)
2172 btrfs_queue_worker(&root->fs_info->submit_workers, 2183 btrfs_queue_worker(&root->fs_info->submit_workers,
2173 &device->work); 2184 &device->work);
2185
2186 limit = btrfs_async_submit_limit(root->fs_info);
2187 wait_event_timeout(root->fs_info->async_submit_wait,
2188 (atomic_read(&root->fs_info->nr_async_bios) < limit),
2189 HZ/10);
2174 return 0; 2190 return 0;
2175} 2191}
2176 2192