aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fusionio.com>2012-09-25 14:25:58 -0400
committerChris Mason <chris.mason@fusionio.com>2012-10-09 09:15:40 -0400
commitde0022b9da616b95ea5b41eab32da825b0b5150f (patch)
tree5a96323c250b846da82da82ed999828255be0e34 /fs/btrfs
parent221b831835421f9451182611fa25fa60f440662f (diff)
Btrfs: do not async metadata csumming in certain situations
There are a coule scenarios where farming metadata csumming off to an async thread doesn't help. The first is if our processor supports crc32c, in which case the csumming will be fast and so the overhead of the async model is not worth the cost. The other case is for our tree log. We will be making that stuff dirty and writing it out and waiting for it immediately. Even with software crc32c this gives me a ~15% increase in speed with O_SYNC workloads. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/disk-io.c22
-rw-r--r--fs/btrfs/extent_io.c14
-rw-r--r--fs/btrfs/extent_io.h1
3 files changed, 35 insertions, 2 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index dcaf55695e6f..aa02eab8c40b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -46,6 +46,10 @@
46#include "check-integrity.h" 46#include "check-integrity.h"
47#include "rcu-string.h" 47#include "rcu-string.h"
48 48
49#ifdef CONFIG_X86
50#include <asm/cpufeature.h>
51#endif
52
49static struct extent_io_ops btree_extent_io_ops; 53static struct extent_io_ops btree_extent_io_ops;
50static void end_workqueue_fn(struct btrfs_work *work); 54static void end_workqueue_fn(struct btrfs_work *work);
51static void free_fs_root(struct btrfs_root *root); 55static void free_fs_root(struct btrfs_root *root);
@@ -859,10 +863,22 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
859 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); 863 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
860} 864}
861 865
866static int check_async_write(struct inode *inode, unsigned long bio_flags)
867{
868 if (bio_flags & EXTENT_BIO_TREE_LOG)
869 return 0;
870#ifdef CONFIG_X86
871 if (cpu_has_xmm4_2)
872 return 0;
873#endif
874 return 1;
875}
876
862static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, 877static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
863 int mirror_num, unsigned long bio_flags, 878 int mirror_num, unsigned long bio_flags,
864 u64 bio_offset) 879 u64 bio_offset)
865{ 880{
881 int async = check_async_write(inode, bio_flags);
866 int ret; 882 int ret;
867 883
868 if (!(rw & REQ_WRITE)) { 884 if (!(rw & REQ_WRITE)) {
@@ -877,6 +893,12 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
877 return ret; 893 return ret;
878 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, 894 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
879 mirror_num, 0); 895 mirror_num, 0);
896 } else if (!async) {
897 ret = btree_csum_one_bio(bio);
898 if (ret)
899 return ret;
900 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
901 mirror_num, 0);
880 } 902 }
881 903
882 /* 904 /*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a2c21570adf5..979fa0d6bfee 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -45,6 +45,7 @@ struct extent_page_data {
45 struct bio *bio; 45 struct bio *bio;
46 struct extent_io_tree *tree; 46 struct extent_io_tree *tree;
47 get_extent_t *get_extent; 47 get_extent_t *get_extent;
48 unsigned long bio_flags;
48 49
49 /* tells writepage not to lock the state bits for this range 50 /* tells writepage not to lock the state bits for this range
50 * it still does the unlocking 51 * it still does the unlocking
@@ -3163,12 +3164,16 @@ static int write_one_eb(struct extent_buffer *eb,
3163 struct block_device *bdev = fs_info->fs_devices->latest_bdev; 3164 struct block_device *bdev = fs_info->fs_devices->latest_bdev;
3164 u64 offset = eb->start; 3165 u64 offset = eb->start;
3165 unsigned long i, num_pages; 3166 unsigned long i, num_pages;
3167 unsigned long bio_flags = 0;
3166 int rw = (epd->sync_io ? WRITE_SYNC : WRITE); 3168 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3167 int ret = 0; 3169 int ret = 0;
3168 3170
3169 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 3171 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3170 num_pages = num_extent_pages(eb->start, eb->len); 3172 num_pages = num_extent_pages(eb->start, eb->len);
3171 atomic_set(&eb->io_pages, num_pages); 3173 atomic_set(&eb->io_pages, num_pages);
3174 if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
3175 bio_flags = EXTENT_BIO_TREE_LOG;
3176
3172 for (i = 0; i < num_pages; i++) { 3177 for (i = 0; i < num_pages; i++) {
3173 struct page *p = extent_buffer_page(eb, i); 3178 struct page *p = extent_buffer_page(eb, i);
3174 3179
@@ -3177,7 +3182,8 @@ static int write_one_eb(struct extent_buffer *eb,
3177 ret = submit_extent_page(rw, eb->tree, p, offset >> 9, 3182 ret = submit_extent_page(rw, eb->tree, p, offset >> 9,
3178 PAGE_CACHE_SIZE, 0, bdev, &epd->bio, 3183 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
3179 -1, end_bio_extent_buffer_writepage, 3184 -1, end_bio_extent_buffer_writepage,
3180 0, 0, 0); 3185 0, epd->bio_flags, bio_flags);
3186 epd->bio_flags = bio_flags;
3181 if (ret) { 3187 if (ret) {
3182 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 3188 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3183 SetPageError(p); 3189 SetPageError(p);
@@ -3212,6 +3218,7 @@ int btree_write_cache_pages(struct address_space *mapping,
3212 .tree = tree, 3218 .tree = tree,
3213 .extent_locked = 0, 3219 .extent_locked = 0,
3214 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3220 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3221 .bio_flags = 0,
3215 }; 3222 };
3216 int ret = 0; 3223 int ret = 0;
3217 int done = 0; 3224 int done = 0;
@@ -3474,7 +3481,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
3474 if (epd->sync_io) 3481 if (epd->sync_io)
3475 rw = WRITE_SYNC; 3482 rw = WRITE_SYNC;
3476 3483
3477 ret = submit_one_bio(rw, epd->bio, 0, 0); 3484 ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
3478 BUG_ON(ret < 0); /* -ENOMEM */ 3485 BUG_ON(ret < 0); /* -ENOMEM */
3479 epd->bio = NULL; 3486 epd->bio = NULL;
3480 } 3487 }
@@ -3497,6 +3504,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
3497 .get_extent = get_extent, 3504 .get_extent = get_extent,
3498 .extent_locked = 0, 3505 .extent_locked = 0,
3499 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3506 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3507 .bio_flags = 0,
3500 }; 3508 };
3501 3509
3502 ret = __extent_writepage(page, wbc, &epd); 3510 ret = __extent_writepage(page, wbc, &epd);
@@ -3521,6 +3529,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
3521 .get_extent = get_extent, 3529 .get_extent = get_extent,
3522 .extent_locked = 1, 3530 .extent_locked = 1,
3523 .sync_io = mode == WB_SYNC_ALL, 3531 .sync_io = mode == WB_SYNC_ALL,
3532 .bio_flags = 0,
3524 }; 3533 };
3525 struct writeback_control wbc_writepages = { 3534 struct writeback_control wbc_writepages = {
3526 .sync_mode = mode, 3535 .sync_mode = mode,
@@ -3560,6 +3569,7 @@ int extent_writepages(struct extent_io_tree *tree,
3560 .get_extent = get_extent, 3569 .get_extent = get_extent,
3561 .extent_locked = 0, 3570 .extent_locked = 0,
3562 .sync_io = wbc->sync_mode == WB_SYNC_ALL, 3571 .sync_io = wbc->sync_mode == WB_SYNC_ALL,
3572 .bio_flags = 0,
3563 }; 3573 };
3564 3574
3565 ret = extent_write_cache_pages(tree, mapping, wbc, 3575 ret = extent_write_cache_pages(tree, mapping, wbc,
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 512f8da041f1..a69dea219044 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -27,6 +27,7 @@
27 * type for this bio 27 * type for this bio
28 */ 28 */
29#define EXTENT_BIO_COMPRESSED 1 29#define EXTENT_BIO_COMPRESSED 1
30#define EXTENT_BIO_TREE_LOG 2
30#define EXTENT_BIO_FLAG_SHIFT 16 31#define EXTENT_BIO_FLAG_SHIFT 16
31 32
32/* these are bit numbers for test/set bit */ 33/* these are bit numbers for test/set bit */