aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/async-thread.c7
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c45
3 files changed, 33 insertions, 20 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 5f2f5a8c2289..958cd8b5f0d7 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -48,6 +48,7 @@ struct btrfs_worker_thread {
48 48
49 /* number of things on the pending list */ 49 /* number of things on the pending list */
50 atomic_t num_pending; 50 atomic_t num_pending;
51 unsigned long sequence;
51 52
52 /* protects the pending list. */ 53 /* protects the pending list. */
53 spinlock_t lock; 54 spinlock_t lock;
@@ -197,6 +198,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
197 198
198 spin_lock_irq(&workers->lock); 199 spin_lock_irq(&workers->lock);
199 list_add_tail(&worker->worker_list, &workers->idle_list); 200 list_add_tail(&worker->worker_list, &workers->idle_list);
201 worker->idle = 1;
200 workers->num_workers++; 202 workers->num_workers++;
201 spin_unlock_irq(&workers->lock); 203 spin_unlock_irq(&workers->lock);
202 } 204 }
@@ -238,7 +240,10 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
238 */ 240 */
239 next = workers->worker_list.next; 241 next = workers->worker_list.next;
240 worker = list_entry(next, struct btrfs_worker_thread, worker_list); 242 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
241 list_move_tail(next, &workers->worker_list); 243 atomic_inc(&worker->num_pending);
244 worker->sequence++;
245 if (worker->sequence % 4 == 0)
246 list_move_tail(next, &workers->worker_list);
242 return worker; 247 return worker;
243} 248}
244 249
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index bb4a8d2200d0..040213359393 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -526,6 +526,7 @@ struct btrfs_fs_info {
526 struct btrfs_transaction *running_transaction; 526 struct btrfs_transaction *running_transaction;
527 wait_queue_head_t transaction_throttle; 527 wait_queue_head_t transaction_throttle;
528 wait_queue_head_t transaction_wait; 528 wait_queue_head_t transaction_wait;
529 wait_queue_head_t async_submit_wait;
529 struct btrfs_super_block super_copy; 530 struct btrfs_super_block super_copy;
530 struct btrfs_super_block super_for_commit; 531 struct btrfs_super_block super_for_commit;
531 struct block_device *__bdev; 532 struct block_device *__bdev;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1bf210dadef6..1aed1f4616b6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -429,31 +429,36 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
429 return 0; 429 return 0;
430} 430}
431 431
432int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) 432static unsigned long async_submit_limit(struct btrfs_fs_info *info)
433{ 433{
434 int limit = 256 * info->fs_devices->open_devices; 434 unsigned long limit = min_t(unsigned long,
435 435 info->workers.max_workers,
436 if (iodone) 436 info->fs_devices->open_devices);
437 limit = (limit * 3) / 2; 437 return 256 * limit;
438 if (atomic_read(&info->nr_async_submits) > limit) 438}
439 return 1;
440 439
441 return atomic_read(&info->nr_async_bios) > limit; 440int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
441{
442 return atomic_read(&info->nr_async_bios) > async_submit_limit(info);
442} 443}
443 444
444static void run_one_async_submit(struct btrfs_work *work) 445static void run_one_async_submit(struct btrfs_work *work)
445{ 446{
446 struct btrfs_fs_info *fs_info; 447 struct btrfs_fs_info *fs_info;
447 struct async_submit_bio *async; 448 struct async_submit_bio *async;
449 int limit;
448 450
449 async = container_of(work, struct async_submit_bio, work); 451 async = container_of(work, struct async_submit_bio, work);
450 fs_info = BTRFS_I(async->inode)->root->fs_info; 452 fs_info = BTRFS_I(async->inode)->root->fs_info;
453
454 limit = async_submit_limit(fs_info);
455 limit = limit * 2 / 3;
456
451 atomic_dec(&fs_info->nr_async_submits); 457 atomic_dec(&fs_info->nr_async_submits);
452 458
453 if ((async->bio->bi_rw & (1 << BIO_RW)) && 459 if (atomic_read(&fs_info->nr_async_submits) < limit)
454 !btrfs_congested_async(fs_info, 1)) { 460 wake_up(&fs_info->async_submit_wait);
455 clear_bdi_congested(&fs_info->bdi, WRITE); 461
456 }
457 async->submit_bio_hook(async->inode, async->rw, async->bio, 462 async->submit_bio_hook(async->inode, async->rw, async->bio,
458 async->mirror_num); 463 async->mirror_num);
459 kfree(async); 464 kfree(async);
@@ -464,6 +469,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
464 extent_submit_bio_hook_t *submit_bio_hook) 469 extent_submit_bio_hook_t *submit_bio_hook)
465{ 470{
466 struct async_submit_bio *async; 471 struct async_submit_bio *async;
472 int limit = async_submit_limit(fs_info);
467 473
468 async = kmalloc(sizeof(*async), GFP_NOFS); 474 async = kmalloc(sizeof(*async), GFP_NOFS);
469 if (!async) 475 if (!async)
@@ -478,6 +484,10 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
478 async->work.flags = 0; 484 async->work.flags = 0;
479 atomic_inc(&fs_info->nr_async_submits); 485 atomic_inc(&fs_info->nr_async_submits);
480 btrfs_queue_worker(&fs_info->workers, &async->work); 486 btrfs_queue_worker(&fs_info->workers, &async->work);
487
488 wait_event_timeout(fs_info->async_submit_wait,
489 (atomic_read(&fs_info->nr_async_submits) < limit),
490 HZ/10);
481 return 0; 491 return 0;
482} 492}
483 493
@@ -545,16 +555,11 @@ static int btree_writepages(struct address_space *mapping,
545 if (wbc->sync_mode == WB_SYNC_NONE) { 555 if (wbc->sync_mode == WB_SYNC_NONE) {
546 u64 num_dirty; 556 u64 num_dirty;
547 u64 start = 0; 557 u64 start = 0;
548 unsigned long thresh = 96 * 1024 * 1024; 558 unsigned long thresh = 8 * 1024 * 1024;
549 559
550 if (wbc->for_kupdate) 560 if (wbc->for_kupdate)
551 return 0; 561 return 0;
552 562
553 if (current_is_pdflush()) {
554 thresh = 96 * 1024 * 1024;
555 } else {
556 thresh = 8 * 1024 * 1024;
557 }
558 num_dirty = count_range_bits(tree, &start, (u64)-1, 563 num_dirty = count_range_bits(tree, &start, (u64)-1,
559 thresh, EXTENT_DIRTY); 564 thresh, EXTENT_DIRTY);
560 if (num_dirty < thresh) { 565 if (num_dirty < thresh) {
@@ -1333,6 +1338,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1333 mutex_init(&fs_info->volume_mutex); 1338 mutex_init(&fs_info->volume_mutex);
1334 init_waitqueue_head(&fs_info->transaction_throttle); 1339 init_waitqueue_head(&fs_info->transaction_throttle);
1335 init_waitqueue_head(&fs_info->transaction_wait); 1340 init_waitqueue_head(&fs_info->transaction_wait);
1341 init_waitqueue_head(&fs_info->async_submit_wait);
1336 1342
1337#if 0 1343#if 0
1338 ret = add_hasher(fs_info, "crc32c"); 1344 ret = add_hasher(fs_info, "crc32c");
@@ -1380,6 +1386,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1380 * devices 1386 * devices
1381 */ 1387 */
1382 fs_info->submit_workers.idle_thresh = 64; 1388 fs_info->submit_workers.idle_thresh = 64;
1389 fs_info->workers.idle_thresh = 32;
1383 1390
1384 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1); 1391 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1385 btrfs_init_workers(&fs_info->endio_workers, "endio", 1392 btrfs_init_workers(&fs_info->endio_workers, "endio",
@@ -1849,7 +1856,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1849 struct extent_io_tree *tree; 1856 struct extent_io_tree *tree;
1850 u64 num_dirty; 1857 u64 num_dirty;
1851 u64 start = 0; 1858 u64 start = 0;
1852 unsigned long thresh = 2 * 1024 * 1024; 1859 unsigned long thresh = 12 * 1024 * 1024;
1853 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree; 1860 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1854 1861
1855 if (current_is_pdflush()) 1862 if (current_is_pdflush())