aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-04-16 11:14:51 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:01 -0400
commit44b8bd7edda4f63de180d0f7325c9fb704b3806b (patch)
tree6090e8b70d6a726caed7d4662c311779d1927cf7 /fs/btrfs/disk-io.c
parent321aecc65671ae8136bd2ca6879b56f0221f8ac8 (diff)
Btrfs: Create a work queue for bio writes
This allows checksumming to happen in parallel among many cpus, and keeps us from bogging down pdflush with the checksumming code. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c93
1 files changed, 90 insertions, 3 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 79c284c87286..9e41ea93ebce 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -47,6 +47,7 @@ static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
47 47
48static struct extent_io_ops btree_extent_io_ops; 48static struct extent_io_ops btree_extent_io_ops;
49static struct workqueue_struct *end_io_workqueue; 49static struct workqueue_struct *end_io_workqueue;
50static struct workqueue_struct *async_submit_workqueue;
50 51
51struct end_io_wq { 52struct end_io_wq {
52 struct bio *bio; 53 struct bio *bio;
@@ -58,6 +59,15 @@ struct end_io_wq {
58 struct list_head list; 59 struct list_head list;
59}; 60};
60 61
62struct async_submit_bio {
63 struct inode *inode;
64 struct bio *bio;
65 struct list_head list;
66 extent_submit_bio_hook_t *submit_bio_hook;
67 int rw;
68 int mirror_num;
69};
70
61struct extent_map *btree_get_extent(struct inode *inode, struct page *page, 71struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
62 size_t page_offset, u64 start, u64 len, 72 size_t page_offset, u64 start, u64 len,
63 int create) 73 int create)
@@ -365,7 +375,31 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
365 return 0; 375 return 0;
366} 376}
367 377
368static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 378int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
379 int rw, struct bio *bio, int mirror_num,
380 extent_submit_bio_hook_t *submit_bio_hook)
381{
382 struct async_submit_bio *async;
383
384 async = kmalloc(sizeof(*async), GFP_NOFS);
385 if (!async)
386 return -ENOMEM;
387
388 async->inode = inode;
389 async->rw = rw;
390 async->bio = bio;
391 async->mirror_num = mirror_num;
392 async->submit_bio_hook = submit_bio_hook;
393
394 spin_lock(&fs_info->async_submit_work_lock);
395 list_add_tail(&async->list, &fs_info->async_submit_work_list);
396 spin_unlock(&fs_info->async_submit_work_lock);
397
398 queue_work(async_submit_workqueue, &fs_info->async_submit_work);
399 return 0;
400}
401
402static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
369 int mirror_num) 403 int mirror_num)
370{ 404{
371 struct btrfs_root *root = BTRFS_I(inode)->root; 405 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -389,6 +423,17 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
389 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num); 423 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num);
390} 424}
391 425
426static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
427 int mirror_num)
428{
429 if (!(rw & (1 << BIO_RW))) {
430 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
431 }
432 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
433 inode, rw, bio, mirror_num,
434 __btree_submit_bio_hook);
435}
436
392static int btree_writepage(struct page *page, struct writeback_control *wbc) 437static int btree_writepage(struct page *page, struct writeback_control *wbc)
393{ 438{
394 struct extent_io_tree *tree; 439 struct extent_io_tree *tree;
@@ -903,9 +948,9 @@ static int bio_ready_for_csum(struct bio *bio)
903} 948}
904 949
905#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) 950#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
906void btrfs_end_io_csum(void *p) 951static void btrfs_end_io_csum(void *p)
907#else 952#else
908void btrfs_end_io_csum(struct work_struct *work) 953static void btrfs_end_io_csum(struct work_struct *work)
909#endif 954#endif
910{ 955{
911#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) 956#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
@@ -959,6 +1004,39 @@ void btrfs_end_io_csum(struct work_struct *work)
959 } 1004 }
960} 1005}
961 1006
1007#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1008static void btrfs_async_submit_work(void *p)
1009#else
1010static void btrfs_async_submit_work(struct work_struct *work)
1011#endif
1012{
1013#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1014 struct btrfs_fs_info *fs_info = p;
1015#else
1016 struct btrfs_fs_info *fs_info = container_of(work,
1017 struct btrfs_fs_info,
1018 async_submit_work);
1019#endif
1020 struct async_submit_bio *async;
1021 struct list_head *next;
1022
1023 while(1) {
1024 spin_lock(&fs_info->async_submit_work_lock);
1025 if (list_empty(&fs_info->async_submit_work_list)) {
1026 spin_unlock(&fs_info->async_submit_work_lock);
1027 return;
1028 }
1029 next = fs_info->async_submit_work_list.next;
1030 list_del(next);
1031 spin_unlock(&fs_info->async_submit_work_lock);
1032
1033 async = list_entry(next, struct async_submit_bio, list);
1034 async->submit_bio_hook(async->inode, async->rw, async->bio,
1035 async->mirror_num);
1036 kfree(async);
1037 }
1038}
1039
962struct btrfs_root *open_ctree(struct super_block *sb, 1040struct btrfs_root *open_ctree(struct super_block *sb,
963 struct btrfs_fs_devices *fs_devices) 1041 struct btrfs_fs_devices *fs_devices)
964{ 1042{
@@ -987,14 +1065,17 @@ struct btrfs_root *open_ctree(struct super_block *sb,
987 } 1065 }
988 end_io_workqueue = create_workqueue("btrfs-end-io"); 1066 end_io_workqueue = create_workqueue("btrfs-end-io");
989 BUG_ON(!end_io_workqueue); 1067 BUG_ON(!end_io_workqueue);
1068 async_submit_workqueue = create_workqueue("btrfs-async-submit");
990 1069
991 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS); 1070 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
992 INIT_LIST_HEAD(&fs_info->trans_list); 1071 INIT_LIST_HEAD(&fs_info->trans_list);
993 INIT_LIST_HEAD(&fs_info->dead_roots); 1072 INIT_LIST_HEAD(&fs_info->dead_roots);
994 INIT_LIST_HEAD(&fs_info->hashers); 1073 INIT_LIST_HEAD(&fs_info->hashers);
995 INIT_LIST_HEAD(&fs_info->end_io_work_list); 1074 INIT_LIST_HEAD(&fs_info->end_io_work_list);
1075 INIT_LIST_HEAD(&fs_info->async_submit_work_list);
996 spin_lock_init(&fs_info->hash_lock); 1076 spin_lock_init(&fs_info->hash_lock);
997 spin_lock_init(&fs_info->end_io_work_lock); 1077 spin_lock_init(&fs_info->end_io_work_lock);
1078 spin_lock_init(&fs_info->async_submit_work_lock);
998 spin_lock_init(&fs_info->delalloc_lock); 1079 spin_lock_init(&fs_info->delalloc_lock);
999 spin_lock_init(&fs_info->new_trans_lock); 1080 spin_lock_init(&fs_info->new_trans_lock);
1000 1081
@@ -1041,9 +1122,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1041 1122
1042#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18) 1123#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1043 INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum, fs_info); 1124 INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum, fs_info);
1125 INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work,
1126 fs_info);
1044 INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info); 1127 INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
1045#else 1128#else
1046 INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum); 1129 INIT_WORK(&fs_info->end_io_work, btrfs_end_io_csum);
1130 INIT_WORK(&fs_info->async_submit_work, btrfs_async_submit_work);
1047 INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner); 1131 INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
1048#endif 1132#endif
1049 BTRFS_I(fs_info->btree_inode)->root = tree_root; 1133 BTRFS_I(fs_info->btree_inode)->root = tree_root;
@@ -1403,6 +1487,9 @@ int close_ctree(struct btrfs_root *root)
1403 flush_workqueue(end_io_workqueue); 1487 flush_workqueue(end_io_workqueue);
1404 destroy_workqueue(end_io_workqueue); 1488 destroy_workqueue(end_io_workqueue);
1405 1489
1490 flush_workqueue(async_submit_workqueue);
1491 destroy_workqueue(async_submit_workqueue);
1492
1406 iput(fs_info->btree_inode); 1493 iput(fs_info->btree_inode);
1407#if 0 1494#if 0
1408 while(!list_empty(&fs_info->hashers)) { 1495 while(!list_empty(&fs_info->hashers)) {