aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-08-20 13:39:41 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:06 -0400
commitb64a2851ba25b3410a87d3d1b751155612105c8e (patch)
tree5bad12d67cfdd6657acbe1091dd1c196e5e17818
parent4d1b5fb4d7075f862848dbff8873e22382abd482 (diff)
Btrfs: Wait for async bio submissions to make some progress at queue time
Before, the btrfs bdi congestion function was used to test for too many async bios. This keeps that check to throttle pdflush, but also adds a check while queuing bios. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/disk-io.c16
-rw-r--r--fs/btrfs/disk-io.h1
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/volumes.c18
4 files changed, 27 insertions, 10 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 92e14dd9bddb..bbba14b629d2 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -429,7 +429,7 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
429 return 0; 429 return 0;
430} 430}
431 431
432static unsigned long async_submit_limit(struct btrfs_fs_info *info) 432unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
433{ 433{
434 unsigned long limit = min_t(unsigned long, 434 unsigned long limit = min_t(unsigned long,
435 info->workers.max_workers, 435 info->workers.max_workers,
@@ -439,7 +439,8 @@ static unsigned long async_submit_limit(struct btrfs_fs_info *info)
439 439
440int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) 440int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
441{ 441{
442 return atomic_read(&info->nr_async_bios) > async_submit_limit(info); 442 return atomic_read(&info->nr_async_bios) >
443 btrfs_async_submit_limit(info);
443} 444}
444 445
445static void run_one_async_submit(struct btrfs_work *work) 446static void run_one_async_submit(struct btrfs_work *work)
@@ -451,12 +452,13 @@ static void run_one_async_submit(struct btrfs_work *work)
451 async = container_of(work, struct async_submit_bio, work); 452 async = container_of(work, struct async_submit_bio, work);
452 fs_info = BTRFS_I(async->inode)->root->fs_info; 453 fs_info = BTRFS_I(async->inode)->root->fs_info;
453 454
454 limit = async_submit_limit(fs_info); 455 limit = btrfs_async_submit_limit(fs_info);
455 limit = limit * 2 / 3; 456 limit = limit * 2 / 3;
456 457
457 atomic_dec(&fs_info->nr_async_submits); 458 atomic_dec(&fs_info->nr_async_submits);
458 459
459 if (atomic_read(&fs_info->nr_async_submits) < limit) 460 if (atomic_read(&fs_info->nr_async_submits) < limit &&
461 waitqueue_active(&fs_info->async_submit_wait))
460 wake_up(&fs_info->async_submit_wait); 462 wake_up(&fs_info->async_submit_wait);
461 463
462 async->submit_bio_hook(async->inode, async->rw, async->bio, 464 async->submit_bio_hook(async->inode, async->rw, async->bio,
@@ -469,7 +471,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
469 extent_submit_bio_hook_t *submit_bio_hook) 471 extent_submit_bio_hook_t *submit_bio_hook)
470{ 472{
471 struct async_submit_bio *async; 473 struct async_submit_bio *async;
472 int limit = async_submit_limit(fs_info); 474 int limit = btrfs_async_submit_limit(fs_info);
473 475
474 async = kmalloc(sizeof(*async), GFP_NOFS); 476 async = kmalloc(sizeof(*async), GFP_NOFS);
475 if (!async) 477 if (!async)
@@ -1863,10 +1865,10 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1863 struct extent_io_tree *tree; 1865 struct extent_io_tree *tree;
1864 u64 num_dirty; 1866 u64 num_dirty;
1865 u64 start = 0; 1867 u64 start = 0;
1866 unsigned long thresh = 12 * 1024 * 1024; 1868 unsigned long thresh = 96 * 1024 * 1024;
1867 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; 1869 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1868 1870
1869 if (current_is_pdflush()) 1871 if (current_is_pdflush() || current->flags & PF_MEMALLOC)
1870 return; 1872 return;
1871 1873
1872 num_dirty = count_range_bits(tree, &start, (u64)-1, 1874 num_dirty = count_range_bits(tree, &start, (u64)-1,
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index e904a69347a4..2562a273ae18 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -73,4 +73,5 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
73 int rw, struct bio *bio, int mirror_num, 73 int rw, struct bio *bio, int mirror_num,
74 extent_submit_bio_hook_t *submit_bio_hook); 74 extent_submit_bio_hook_t *submit_bio_hook);
75int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); 75int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
76unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
76#endif 77#endif
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 6bcb0876f9bb..eff3ad72991b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -322,8 +322,6 @@ int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
322 if (ret) 322 if (ret)
323 break; 323 break;
324 while(start <= end) { 324 while(start <= end) {
325 if (btrfs_congested_async(root->fs_info, 0))
326 congestion_wait(WRITE, HZ/10);
327 cond_resched(); 325 cond_resched();
328 326
329 index = start >> PAGE_CACHE_SHIFT; 327 index = start >> PAGE_CACHE_SHIFT;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 23a5b0aba00a..2652660e6079 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -138,12 +138,18 @@ int run_scheduled_bios(struct btrfs_device *device)
138{ 138{
139 struct bio *pending; 139 struct bio *pending;
140 struct backing_dev_info *bdi; 140 struct backing_dev_info *bdi;
141 struct btrfs_fs_info *fs_info;
141 struct bio *tail; 142 struct bio *tail;
142 struct bio *cur; 143 struct bio *cur;
143 int again = 0; 144 int again = 0;
144 unsigned long num_run = 0; 145 unsigned long num_run = 0;
146 unsigned long limit;
145 147
146 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info; 148 bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
149 fs_info = device->dev_root->fs_info;
150 limit = btrfs_async_submit_limit(fs_info);
151 limit = limit * 2 / 3;
152
147loop: 153loop:
148 spin_lock(&device->io_lock); 154 spin_lock(&device->io_lock);
149 155
@@ -179,7 +185,11 @@ loop:
179 cur = pending; 185 cur = pending;
180 pending = pending->bi_next; 186 pending = pending->bi_next;
181 cur->bi_next = NULL; 187 cur->bi_next = NULL;
182 atomic_dec(&device->dev_root->fs_info->nr_async_bios); 188 atomic_dec(&fs_info->nr_async_bios);
189
190 if (atomic_read(&fs_info->nr_async_bios) < limit &&
191 waitqueue_active(&fs_info->async_submit_wait))
192 wake_up(&fs_info->async_submit_wait);
183 193
184 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 194 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
185 bio_get(cur); 195 bio_get(cur);
@@ -2135,6 +2145,7 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2135 int rw, struct bio *bio) 2145 int rw, struct bio *bio)
2136{ 2146{
2137 int should_queue = 1; 2147 int should_queue = 1;
2148 unsigned long limit;
2138 2149
2139 /* don't bother with additional async steps for reads, right now */ 2150 /* don't bother with additional async steps for reads, right now */
2140 if (!(rw & (1 << BIO_RW))) { 2151 if (!(rw & (1 << BIO_RW))) {
@@ -2171,6 +2182,11 @@ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2171 if (should_queue) 2182 if (should_queue)
2172 btrfs_queue_worker(&root->fs_info->submit_workers, 2183 btrfs_queue_worker(&root->fs_info->submit_workers,
2173 &device->work); 2184 &device->work);
2185
2186 limit = btrfs_async_submit_limit(root->fs_info);
2187 wait_event_timeout(root->fs_info->async_submit_wait,
2188 (atomic_read(&root->fs_info->nr_async_bios) < limit),
2189 HZ/10);
2174 return 0; 2190 return 0;
2175} 2191}
2176 2192