aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiu Bo <bo.li.liu@oracle.com>2014-03-04 21:07:35 -0500
committerJosef Bacik <jbacik@fb.com>2014-03-10 15:17:19 -0400
commit2131bcd38b18167f499f190acf3409dfe5b3c280 (patch)
tree505aa099a705f6da3dec233c05fe0f827ff8cf84
parenta4d96d6254590df5eb9a6ac32434ed9d33a46d19 (diff)
Btrfs: add readahead for send_write
Btrfs send reads data from disk and then writes to a stream via pipe or a file via flush. Currently we're going to read each page a time, so every page results in a disk read, which is not friendly to disks, esp. HDD. Given that, the performance can be gained by adding readahead for those pages. Here is a quick test: $ btrfs subvolume create send $ xfs_io -f -c "pwrite 0 1G" send/foobar $ btrfs subvolume snap -r send ro $ time "btrfs send ro -f /dev/null" w/o w real 1m37.527s 0m9.097s user 0m0.122s 0m0.086s sys 0m53.191s 0m12.857s Signed-off-by: Liu Bo <bo.li.liu@oracle.com> Reviewed-by: David Sterba <dsterba@suse.cz> Signed-off-by: Josef Bacik <jbacik@fb.com>
-rw-r--r--fs/btrfs/send.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 112eb647b5cd..646369179697 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -124,6 +124,8 @@ struct send_ctx {
124 struct list_head name_cache_list; 124 struct list_head name_cache_list;
125 int name_cache_size; 125 int name_cache_size;
126 126
127 struct file_ra_state ra;
128
127 char *read_buf; 129 char *read_buf;
128 130
129 /* 131 /*
@@ -4170,6 +4172,13 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4170 goto out; 4172 goto out;
4171 4173
4172 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; 4174 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT;
4175
4176 /* initial readahead */
4177 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4178 file_ra_state_init(&sctx->ra, inode->i_mapping);
4179 btrfs_force_ra(inode->i_mapping, &sctx->ra, NULL, index,
4180 last_index - index + 1);
4181
4173 while (index <= last_index) { 4182 while (index <= last_index) {
4174 unsigned cur_len = min_t(unsigned, len, 4183 unsigned cur_len = min_t(unsigned, len,
4175 PAGE_CACHE_SIZE - pg_offset); 4184 PAGE_CACHE_SIZE - pg_offset);