aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-31 15:42:53 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commit61b4944018449003ac5f9757f4d125dce519cf51 (patch)
tree553855996c641a945344db870b6dfd0d2d02086e /fs/btrfs/disk-io.c
parent37d1aeee3990385e9bb436c50c2f7e120a668df6 (diff)
Btrfs: Fix streaming read performance with checksumming on
Large streaming reads make for large bios, which means each entry on the list async work queues represents a large amount of data. IO congestion throttling on the device was kicking in before the async worker threads decided a single thread was busy and needed some help. The end result was that a streaming read would result in a single CPU running at 100% instead of balancing the work off to other CPUs. This patch also changes the pre-IO checksum lookup done by reads to work on a per-bio basis instead of a per-page. This results in many extra btree lookups on large streaming reads. Doing the checksum lookup right before bio submit allows us to reuse searches while processing adjacent offsets. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e826730d750f..d2d1cc87e8ad 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1357,10 +1357,25 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1357 */ 1357 */
1358 btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size); 1358 btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
1359 btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size); 1359 btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
1360
1361 /* a higher idle thresh on the submit workers makes it much more
1362 * likely that bios will be send down in a sane order to the
1363 * devices
1364 */
1365 fs_info->submit_workers.idle_thresh = 64;
1366
1360 btrfs_init_workers(&fs_info->fixup_workers, 1); 1367 btrfs_init_workers(&fs_info->fixup_workers, 1);
1361 btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size); 1368 btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1362 btrfs_init_workers(&fs_info->endio_write_workers, 1369 btrfs_init_workers(&fs_info->endio_write_workers,
1363 fs_info->thread_pool_size); 1370 fs_info->thread_pool_size);
1371
1372 /*
1373 * endios are largely parallel and should have a very
1374 * low idle thresh
1375 */
1376 fs_info->endio_workers.idle_thresh = 4;
1377 fs_info->endio_write_workers.idle_thresh = 4;
1378
1364 btrfs_start_workers(&fs_info->workers, 1); 1379 btrfs_start_workers(&fs_info->workers, 1);
1365 btrfs_start_workers(&fs_info->submit_workers, 1); 1380 btrfs_start_workers(&fs_info->submit_workers, 1);
1366 btrfs_start_workers(&fs_info->fixup_workers, 1); 1381 btrfs_start_workers(&fs_info->fixup_workers, 1);