diff options
author | Tahsin Erdogan <tahsin@google.com> | 2017-08-06 00:07:01 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2017-08-06 00:07:01 -0400 |
commit | 9699d4f91d9bd2f70dcc37afe3c9f18145ab2dba (patch) | |
tree | 8fc510c6e2da691b84598b8b2cb13aeb92d00288 /fs/ext4/xattr.c | |
parent | ec00022030da5761518476096626338bd67df57a (diff) |
ext4: make xattr inode reads faster
ext4_xattr_inode_read() currently reads each block sequentially while
waiting for io operation to complete before moving on to the next
block. This prevents request merging in block layer.
Add a ext4_bread_batch() function that starts reads for all blocks
then optionally waits for them to complete. A similar logic is used
in ext4_find_entry(), so update that code to use the new function.
Signed-off-by: Tahsin Erdogan <tahsin@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/xattr.c')
-rw-r--r-- | fs/ext4/xattr.c | 51 |
1 files changed, 32 insertions, 19 deletions
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 4025666c5991..5fa912e5d2a6 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
@@ -317,28 +317,41 @@ static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash) | |||
317 | */ | 317 | */ |
318 | static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size) | 318 | static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size) |
319 | { | 319 | { |
320 | unsigned long block = 0; | 320 | int blocksize = 1 << ea_inode->i_blkbits; |
321 | struct buffer_head *bh; | 321 | int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits; |
322 | int blocksize = ea_inode->i_sb->s_blocksize; | 322 | int tail_size = (size % blocksize) ?: blocksize; |
323 | size_t csize, copied = 0; | 323 | struct buffer_head *bhs_inline[8]; |
324 | void *copy_pos = buf; | 324 | struct buffer_head **bhs = bhs_inline; |
325 | 325 | int i, ret; | |
326 | while (copied < size) { | 326 | |
327 | csize = (size - copied) > blocksize ? blocksize : size - copied; | 327 | if (bh_count > ARRAY_SIZE(bhs_inline)) { |
328 | bh = ext4_bread(NULL, ea_inode, block, 0); | 328 | bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS); |
329 | if (IS_ERR(bh)) | 329 | if (!bhs) |
330 | return PTR_ERR(bh); | 330 | return -ENOMEM; |
331 | if (!bh) | 331 | } |
332 | return -EFSCORRUPTED; | ||
333 | 332 | ||
334 | memcpy(copy_pos, bh->b_data, csize); | 333 | ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count, |
335 | brelse(bh); | 334 | true /* wait */, bhs); |
335 | if (ret) | ||
336 | goto free_bhs; | ||
336 | 337 | ||
337 | copy_pos += csize; | 338 | for (i = 0; i < bh_count; i++) { |
338 | block += 1; | 339 | /* There shouldn't be any holes in ea_inode. */ |
339 | copied += csize; | 340 | if (!bhs[i]) { |
341 | ret = -EFSCORRUPTED; | ||
342 | goto put_bhs; | ||
343 | } | ||
344 | memcpy((char *)buf + blocksize * i, bhs[i]->b_data, | ||
345 | i < bh_count - 1 ? blocksize : tail_size); | ||
340 | } | 346 | } |
341 | return 0; | 347 | ret = 0; |
348 | put_bhs: | ||
349 | for (i = 0; i < bh_count; i++) | ||
350 | brelse(bhs[i]); | ||
351 | free_bhs: | ||
352 | if (bhs != bhs_inline) | ||
353 | kfree(bhs); | ||
354 | return ret; | ||
342 | } | 355 | } |
343 | 356 | ||
344 | static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, | 357 | static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino, |